summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-driver-genwqe91
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio13
-rw-r--r--Documentation/ABI/testing/sysfs-driver-genwqe62
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-efi20
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-efi-runtime-map34
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-boot_params38
-rw-r--r--Documentation/HOWTO4
-rw-r--r--Documentation/RCU/trace.txt22
-rw-r--r--Documentation/acpi/apei/einj.txt19
-rw-r--r--Documentation/block/null_blk.txt72
-rw-r--r--Documentation/circular-buffers.txt45
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt8
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5250-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-palmas.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/dht11.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2563.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt17
-rw-r--r--Documentation/devicetree/bindings/misc/atmel-ssc.txt5
-rw-r--r--Documentation/devicetree/bindings/misc/bmp085.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/atmel-usart.txt7
-rw-r--r--Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt28
-rw-r--r--Documentation/devicetree/bindings/staging/xillybus.txt20
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt22
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt (renamed from Documentation/devicetree/bindings/staging/dwc2.txt)4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/driver-model/design-patterns.txt116
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--Documentation/extcon/porting-android-switch-class9
-rw-r--r--Documentation/ja_JP/HOWTO4
-rw-r--r--Documentation/kernel-parameters.txt38
-rw-r--r--Documentation/kmsg/s390/zcrypt20
-rw-r--r--Documentation/ko_KR/HOWTO130
-rw-r--r--Documentation/kobject.txt5
-rw-r--r--Documentation/memory-barriers.txt922
-rw-r--r--Documentation/misc-devices/mei/mei-amt-version.c2
-rw-r--r--Documentation/robust-futex-ABI.txt4
-rw-r--r--Documentation/sysctl/kernel.txt5
-rw-r--r--Documentation/vme_api.txt12
-rw-r--r--Documentation/x86/boot.txt3
-rw-r--r--Documentation/x86/x86_64/mm.txt7
-rw-r--r--Documentation/zh_CN/HOWTO4
-rw-r--r--Documentation/zorro.txt5
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile22
-rw-r--r--arch/Kconfig67
-rw-r--r--arch/alpha/include/asm/barrier.h25
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/atomic.h5
-rw-r--r--arch/arc/include/asm/barrier.h5
-rw-r--r--arch/arm/Kconfig13
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/misc.c14
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi7
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi7
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi10
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped2
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl2
-rw-r--r--arch/arm/include/asm/barrier.h15
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/xen/page.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/devtree.c2
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/traps.c13
-rw-r--r--arch/arm/kvm/arm.c30
-rw-r--r--arch/arm/mach-clps711x/devices.c21
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c5
-rw-r--r--arch/arm/mach-highbank/highbank.c1
-rw-r--r--arch/arm/mach-omap2/board-ldp.c7
-rw-r--r--arch/arm/mach-omap2/omap4-common.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h2
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c11
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c2
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c4
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mm/flush.c6
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/net/bpf_jit_32.c6
-rw-r--r--arch/arm64/Kconfig27
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi6
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/barrier.h50
-rw-r--r--arch/arm64/include/asm/cmpxchg.h28
-rw-r--r--arch/arm64/include/asm/cpu_ops.h6
-rw-r--r--arch/arm64/include/asm/cputype.h28
-rw-r--r--arch/arm64/include/asm/debug-monitors.h21
-rw-r--r--arch/arm64/include/asm/dma-contiguous.h29
-rw-r--r--arch/arm64/include/asm/futex.h1
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/insn.h108
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/jump_label.h52
-rw-r--r--arch/arm64/include/asm/memory.h3
-rw-r--r--arch/arm64/include/asm/percpu.h41
-rw-r--r--arch/arm64/include/asm/proc-fns.h3
-rw-r--r--arch/arm64/include/asm/smp_plat.h13
-rw-r--r--arch/arm64/include/asm/suspend.h27
-rw-r--r--arch/arm64/include/asm/uaccess.h25
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h94
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h6
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c5
-rw-r--r--arch/arm64/kernel/asm-offsets.c11
-rw-r--r--arch/arm64/kernel/debug-monitors.c88
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/fpsimd.c36
-rw-r--r--arch/arm64/kernel/head.S10
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c203
-rw-r--r--arch/arm64/kernel/insn.c304
-rw-r--r--arch/arm64/kernel/jump_label.c58
-rw-r--r--arch/arm64/kernel/module.c157
-rw-r--r--arch/arm64/kernel/perf_event.c108
-rw-r--r--arch/arm64/kernel/process.c14
-rw-r--r--arch/arm64/kernel/setup.c122
-rw-r--r--arch/arm64/kernel/sleep.S184
-rw-r--r--arch/arm64/kernel/smp.c23
-rw-r--r--arch/arm64/kernel/stacktrace.c2
-rw-r--r--arch/arm64/kernel/suspend.c132
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm64/lib/Makefile8
-rw-r--r--arch/arm64/lib/strncpy_from_user.S50
-rw-r--r--arch/arm64/lib/strnlen_user.S47
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/init.c3
-rw-r--r--arch/arm64/mm/proc.S69
-rw-r--r--arch/avr32/include/asm/barrier.h17
-rw-r--r--arch/blackfin/include/asm/barrier.h18
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/barrier.h25
-rw-r--r--arch/frv/include/asm/barrier.h8
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/atomic.h6
-rw-r--r--arch/hexagon/include/asm/barrier.h4
-rw-r--r--arch/ia64/Kconfig12
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig199
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/barrier.h23
-rw-r--r--arch/ia64/include/asm/machvec.h2
-rw-r--r--arch/ia64/include/asm/machvec_xen.h22
-rw-r--r--arch/ia64/include/asm/meminit.h1
-rw-r--r--arch/ia64/include/asm/paravirt.h1
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h2
-rw-r--r--arch/ia64/include/asm/sync_bitops.h51
-rw-r--r--arch/ia64/include/asm/xen/events.h41
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h265
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h61
-rw-r--r--arch/ia64/include/asm/xen/inst.h486
-rw-r--r--arch/ia64/include/asm/xen/interface.h363
-rw-r--r--arch/ia64/include/asm/xen/irq.h44
-rw-r--r--arch/ia64/include/asm/xen/minstate.h143
-rw-r--r--arch/ia64/include/asm/xen/page-coherent.h38
-rw-r--r--arch/ia64/include/asm/xen/page.h65
-rw-r--r--arch/ia64/include/asm/xen/patchlist.h38
-rw-r--r--arch/ia64/include/asm/xen/privop.h135
-rw-r--r--arch/ia64/include/asm/xen/xcom_hcall.h51
-rw-r--r--arch/ia64/include/asm/xen/xencomm.h42
-rw-r--r--arch/ia64/include/uapi/asm/break.h9
-rw-r--r--arch/ia64/kernel/acpi.c3
-rw-r--r--arch/ia64/kernel/asm-offsets.c32
-rw-r--r--arch/ia64/kernel/head.S3
-rw-r--r--arch/ia64/kernel/nr-irqs.c4
-rw-r--r--arch/ia64/kernel/paravirt_inst.h3
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.h4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S6
-rw-r--r--arch/ia64/xen/Kconfig25
-rw-r--r--arch/ia64/xen/Makefile37
-rw-r--r--arch/ia64/xen/gate-data.S3
-rw-r--r--arch/ia64/xen/grant-table.c94
-rw-r--r--arch/ia64/xen/hypercall.S88
-rw-r--r--arch/ia64/xen/hypervisor.c97
-rw-r--r--arch/ia64/xen/irq_xen.c443
-rw-r--r--arch/ia64/xen/irq_xen.h34
-rw-r--r--arch/ia64/xen/machvec.c4
-rw-r--r--arch/ia64/xen/suspend.c59
-rw-r--r--arch/ia64/xen/time.c257
-rw-r--r--arch/ia64/xen/time.h24
-rw-r--r--arch/ia64/xen/xcom_hcall.c441
-rw-r--r--arch/ia64/xen/xen_pv_ops.c1141
-rw-r--r--arch/ia64/xen/xencomm.c106
-rw-r--r--arch/ia64/xen/xenivt.S52
-rw-r--r--arch/ia64/xen/xensetup.S80
-rw-r--r--arch/m32r/include/asm/barrier.h80
-rw-r--r--arch/m68k/Kconfig24
-rw-r--r--arch/m68k/amiga/chipram.c2
-rw-r--r--arch/m68k/amiga/config.c63
-rw-r--r--arch/m68k/amiga/platform.c9
-rw-r--r--arch/m68k/apollo/config.c26
-rw-r--r--arch/m68k/atari/ataints.c3
-rw-r--r--arch/m68k/atari/config.c10
-rw-r--r--arch/m68k/atari/debug.c5
-rw-r--r--arch/m68k/bvme6000/config.c6
-rw-r--r--arch/m68k/configs/amiga_defconfig50
-rw-r--r--arch/m68k/configs/apollo_defconfig49
-rw-r--r--arch/m68k/configs/atari_defconfig50
-rw-r--r--arch/m68k/configs/bvme6000_defconfig48
-rw-r--r--arch/m68k/configs/hp300_defconfig49
-rw-r--r--arch/m68k/configs/mac_defconfig51
-rw-r--r--arch/m68k/configs/multi_defconfig53
-rw-r--r--arch/m68k/configs/mvme147_defconfig48
-rw-r--r--arch/m68k/configs/mvme16x_defconfig49
-rw-r--r--arch/m68k/configs/q40_defconfig50
-rw-r--r--arch/m68k/configs/sun3_defconfig49
-rw-r--r--arch/m68k/configs/sun3x_defconfig49
-rw-r--r--arch/m68k/emu/natfeat.c3
-rw-r--r--arch/m68k/hp300/config.c10
-rw-r--r--arch/m68k/include/asm/amigahw.h28
-rw-r--r--arch/m68k/include/asm/apollohw.h11
-rw-r--r--arch/m68k/include/asm/atarihw.h2
-rw-r--r--arch/m68k/include/asm/barrier.h14
-rw-r--r--arch/m68k/include/asm/bootinfo.h360
-rw-r--r--arch/m68k/include/asm/hp300hw.h20
-rw-r--r--arch/m68k/include/asm/kexec.h29
-rw-r--r--arch/m68k/include/asm/mac_via.h2
-rw-r--r--arch/m68k/include/asm/macintosh.h83
-rw-r--r--arch/m68k/include/asm/mc146818rtc.h10
-rw-r--r--arch/m68k/include/asm/mvme16xhw.h17
-rw-r--r--arch/m68k/include/asm/setup.h5
-rw-r--r--arch/m68k/include/asm/timex.h10
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild8
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-amiga.h63
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-apollo.h28
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-atari.h44
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-hp300.h50
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-mac.h119
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-q40.h16
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-vme.h70
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo.h174
-rw-r--r--arch/m68k/include/uapi/asm/setup.h87
-rw-r--r--arch/m68k/kernel/Makefile3
-rw-r--r--arch/m68k/kernel/asm-offsets.c3
-rw-r--r--arch/m68k/kernel/bootinfo_proc.c80
-rw-r--r--arch/m68k/kernel/head.S16
-rw-r--r--arch/m68k/kernel/machine_kexec.c58
-rw-r--r--arch/m68k/kernel/relocate_kernel.S159
-rw-r--r--arch/m68k/kernel/setup_mm.c62
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68k/kernel/traps.c232
-rw-r--r--arch/m68k/mac/config.c34
-rw-r--r--arch/m68k/mac/iop.c5
-rw-r--r--arch/m68k/mac/misc.c2
-rw-r--r--arch/m68k/mac/oss.c1
-rw-r--r--arch/m68k/mac/psc.c3
-rw-r--r--arch/m68k/mac/via.c1
-rw-r--r--arch/m68k/mm/fault.c26
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/mm/kmap.c10
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/mvme147/config.c7
-rw-r--r--arch/m68k/mvme16x/config.c30
-rw-r--r--arch/m68k/q40/config.c4
-rw-r--r--arch/m68k/sun3/dvma.c6
-rw-r--r--arch/m68k/sun3/mmu_emu.c3
-rw-r--r--arch/m68k/sun3/sun3dvma.c8
-rw-r--r--arch/m68k/sun3x/prom.c1
-rw-r--r--arch/metag/include/asm/barrier.h15
-rw-r--r--arch/metag/include/asm/smp.h2
-rw-r--r--arch/metag/kernel/dma.c5
-rw-r--r--arch/metag/kernel/smp.c15
-rw-r--r--arch/metag/kernel/topology.c1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/barrier.h27
-rw-r--r--arch/mips/Kconfig14
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/ar7/setup.c1
-rw-r--r--arch/mips/emma/markeins/setup.c3
-rw-r--r--arch/mips/include/asm/barrier.h15
-rw-r--r--arch/mips/include/asm/cacheops.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h51
-rw-r--r--arch/mips/mm/c-r4k.c11
-rw-r--r--arch/mips/netlogic/xlp/setup.c1
-rw-r--r--arch/mips/netlogic/xlr/setup.c1
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/barrier.h37
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/barrier.h35
-rw-r--r--arch/parisc/include/asm/cacheflush.h12
-rw-r--r--arch/parisc/include/asm/page.h5
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/cache.c35
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts6
-rw-r--r--arch/powerpc/include/asm/barrier.h21
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/unaligned.h7
-rw-r--r--arch/powerpc/include/asm/uprobes.h5
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/prom_init.c22
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/uprobes.c2
-rw-r--r--arch/powerpc/lib/copyuser_64.S53
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c7
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c20
-rw-r--r--arch/powerpc/platforms/powernv/pci.h4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/barrier.h15
-rw-r--r--arch/s390/include/asm/compat.h3
-rw-r--r--arch/s390/include/asm/cpu_mf.h181
-rw-r--r--arch/s390/include/asm/css_chars.h2
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/perf_event.h80
-rw-r--r--arch/s390/include/asm/qdio.h35
-rw-r--r--arch/s390/include/asm/sclp.h4
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h65
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/compat_signal.c5
-rw-r--r--arch/s390/kernel/entry64.S8
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c322
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c1641
-rw-r--r--arch/s390/kernel/perf_event.c174
-rw-r--r--arch/s390/kernel/process.c14
-rw-r--r--arch/s390/kernel/ptrace.c27
-rw-r--r--arch/s390/kernel/s390_ksyms.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/smp.c44
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c4
-rw-r--r--arch/s390/mm/pgtable.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c29
-rw-r--r--arch/s390/oprofile/hwsampler.c78
-rw-r--r--arch/s390/oprofile/hwsampler.h52
-rw-r--r--arch/s390/oprofile/init.c23
-rw-r--r--arch/s390/pci/pci.c16
-rw-r--r--arch/s390/pci/pci_dma.c13
-rw-r--r--arch/s390/pci/pci_event.c28
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/barrier.h16
-rw-r--r--arch/sh/Kconfig15
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/include/asm/barrier.h21
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c5
-rw-r--r--arch/sparc/include/asm/barrier_32.h12
-rw-r--r--arch/sparc/include/asm/barrier_64.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h4
-rw-r--r--arch/sparc/kernel/iommu.c2
-rw-r--r--arch/sparc/kernel/ioport.c5
-rw-r--r--arch/sparc/kernel/kgdb_64.c1
-rw-r--r--arch/sparc/kernel/smp_64.c3
-rw-r--r--arch/sparc/net/bpf_jit_comp.c17
-rw-r--r--arch/tile/include/asm/barrier.h68
-rw-r--r--arch/unicore32/include/asm/barrier.h11
-rw-r--r--arch/x86/Kconfig118
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/bioscall.S6
-rw-r--r--arch/x86/boot/boot.h10
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/aslr.c316
-rw-r--r--arch/x86/boot/compressed/cmdline.c2
-rw-r--r--arch/x86/boot/compressed/cpuflags.c12
-rw-r--r--arch/x86/boot/compressed/head_32.S10
-rw-r--r--arch/x86/boot/compressed/head_64.S16
-rw-r--r--arch/x86/boot/compressed/misc.c18
-rw-r--r--arch/x86/boot/compressed/misc.h37
-rw-r--r--arch/x86/boot/copy.S22
-rw-r--r--arch/x86/boot/cpucheck.c100
-rw-r--r--arch/x86/boot/cpuflags.c104
-rw-r--r--arch/x86/boot/cpuflags.h19
-rw-r--r--arch/x86/boot/header.S9
-rw-r--r--arch/x86/include/asm/archrandom.h21
-rw-r--r--arch/x86/include/asm/barrier.h43
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/efi.h78
-rw-r--r--arch/x86/include/asm/fpu-internal.h13
-rw-r--r--arch/x86/include/asm/futex.h21
-rw-r--r--arch/x86/include/asm/hw_irq.h3
-rw-r--r--arch/x86/include/asm/intel-mid.h48
-rw-r--r--arch/x86/include/asm/iosf_mbi.h90
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/microcode.h15
-rw-r--r--arch/x86/include/asm/microcode_amd.h7
-rw-r--r--arch/x86/include/asm/mpspec.h1
-rw-r--r--arch/x86/include/asm/mwait.h43
-rw-r--r--arch/x86/include/asm/page.h1
-rw-r--r--arch/x86/include/asm/page_32.h4
-rw-r--r--arch/x86/include/asm/page_64_types.h15
-rw-r--r--arch/x86/include/asm/pgtable-2level.h100
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/include/asm/processor.h42
-rw-r--r--arch/x86/include/asm/ptrace.h1
-rw-r--r--arch/x86/include/asm/setup.h3
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/timer.h78
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/uaccess.h124
-rw-r--r--arch/x86/include/asm/uaccess_64.h4
-rw-r--r--arch/x86/include/asm/xsave.h14
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/include/uapi/asm/stat.h42
-rw-r--r--arch/x86/kernel/Makefile13
-rw-r--r--arch/x86/kernel/acpi/cstate.c23
-rw-r--r--arch/x86/kernel/apic/apic.c66
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c1
-rw-r--r--arch/x86/kernel/apic/apic_noop.c1
-rw-r--r--arch/x86/kernel/apic/io_apic.c20
-rw-r--r--arch/x86/kernel/apic/ipi.c1
-rw-r--r--arch/x86/kernel/apic/summit_32.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/amd.c23
-rw-r--r--arch/x86/kernel/cpu/centaur.c1
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c32
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/Makefile7
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c (renamed from arch/x86/kernel/microcode_amd.c)15
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c (renamed from arch/x86/kernel/microcode_amd_early.c)239
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c (renamed from arch/x86/kernel/microcode_core.c)0
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c (renamed from arch/x86/kernel/microcode_core_early.c)0
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c (renamed from arch/x86/kernel/microcode_intel.c)2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c (renamed from arch/x86/kernel/microcode_intel_early.c)10
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c (renamed from arch/x86/kernel/microcode_intel_lib.c)0
-rw-r--r--arch/x86/kernel/cpu/perf_event.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c53
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c679
-rw-r--r--arch/x86/kernel/cpu/rdrand.c14
-rw-r--r--arch/x86/kernel/cpu/transmeta.c1
-rw-r--r--arch/x86/kernel/cpu/umc.c1
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/doublefault.c1
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c1
-rw-r--r--arch/x86/kernel/iosf_mbi.c226
-rw-r--r--arch/x86/kernel/irq.c89
-rw-r--r--arch/x86/kernel/irqinit.c4
-rw-r--r--arch/x86/kernel/kgdb.c1
-rw-r--r--arch/x86/kernel/ksysfs.c340
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/pci-nommu.c1
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/setup.c52
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kernel/traps.c22
-rw-r--r--arch/x86/kernel/tsc.c328
-rw-r--r--arch/x86/kernel/tsc_msr.c127
-rw-r--r--arch/x86/kernel/tsc_sync.c1
-rw-r--r--arch/x86/kernel/xsave.c10
-rw-r--r--arch/x86/kvm/lapic.c10
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/lib/copy_user_64.S12
-rw-r--r--arch/x86/lib/delay.c1
-rw-r--r--arch/x86/lib/x86-opcode-map.txt4
-rw-r--r--arch/x86/mm/fault.c18
-rw-r--r--arch/x86/mm/hugetlbpage.c9
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/mm/kmmio.c1
-rw-r--r--arch/x86/mm/numa.c10
-rw-r--r--arch/x86/mm/pageattr-test.c1
-rw-r--r--arch/x86/mm/pageattr.c461
-rw-r--r--arch/x86/net/bpf_jit_comp.c14
-rw-r--r--arch/x86/pci/fixup.c1
-rw-r--r--arch/x86/pci/intel_mid_pci.c6
-rw-r--r--arch/x86/platform/efi/efi.c355
-rw-r--r--arch/x86/platform/efi/efi_32.c12
-rw-r--r--arch/x86/platform/efi/efi_64.c120
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S54
-rw-r--r--arch/x86/platform/intel-mid/Makefile4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_emc1403.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_lis331.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_tca6416.c4
-rw-r--r--arch/x86/platform/intel-mid/early_printk_intel_mid.c1
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c64
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_weak_decls.h19
-rw-r--r--arch/x86/platform/intel-mid/mfld.c75
-rw-r--r--arch/x86/platform/intel-mid/mrfl.c103
-rw-r--r--arch/x86/platform/intel-mid/sfi.c46
-rw-r--r--arch/x86/platform/iris/iris.c1
-rw-r--r--arch/x86/platform/uv/tlb_uv.c66
-rw-r--r--arch/x86/realmode/init.c26
-rw-r--r--arch/x86/realmode/rm/reboot.S1
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S1
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S1
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/tools/relocs.c20
-rw-r--r--arch/x86/vdso/vclock_gettime.c8
-rw-r--r--arch/x86/vdso/vdso.S1
-rw-r--r--arch/x86/vdso/vdsox32.S1
-rw-r--r--arch/xtensa/include/asm/barrier.h9
-rw-r--r--block/blk-mq-sysfs.c13
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_extlog.c18
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-base.c4
-rw-r--r--drivers/acpi/apei/einj.c58
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c39
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--drivers/ata/ahci.c21
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/bus.c13
-rw-r--r--drivers/base/component.c382
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/firmware_class.c93
-rw-r--r--drivers/block/null_blk.c112
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/z2ram.c7
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/i8k.c358
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/nwbutton.c5
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4
-rw-r--r--drivers/char/tpm/tpm_ppi.c15
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c14
-rw-r--r--drivers/clocksource/Kconfig4
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_global_timer.c4
-rw-r--r--drivers/clocksource/bcm_kona_timer.c6
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c26
-rw-r--r--drivers/clocksource/clksrc-of.c4
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/clocksource/dw_apb_timer.c3
-rw-r--r--drivers/clocksource/nomadik-mtu.c2
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c23
-rw-r--r--drivers/clocksource/sh_mtu2.c4
-rw-r--r--drivers/clocksource/sh_tmu.c4
-rw-r--r--drivers/clocksource/sun4i_timer.c11
-rw-r--r--drivers/clocksource/tegra20_timer.c2
-rw-r--r--drivers/clocksource/time-armada-370-xp.c18
-rw-r--r--drivers/clocksource/time-orion.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c192
-rw-r--r--drivers/clocksource/vt8500_timer.c2
-rw-r--r--drivers/cpufreq/cpufreq.c104
-rw-r--r--drivers/cpufreq/intel_pstate.c8
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/edac/amd64_edac.c135
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c6
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_device.c3
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_stub.c19
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82443bxgx_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c98
-rw-r--r--drivers/edac/mpc85xx_edac.h7
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/extcon/Kconfig10
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-arizona.c73
-rw-r--r--drivers/extcon/extcon-gpio.c32
-rw-r--r--drivers/extcon/extcon-max14577.c752
-rw-r--r--drivers/extcon/extcon-palmas.c17
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dmi-sysfs.c3
-rw-r--r--drivers/firmware/efi/Kconfig15
-rw-r--r--drivers/firmware/efi/Makefile3
-rw-r--r--drivers/firmware/efi/efi.c45
-rw-r--r--drivers/firmware/efi/runtime-map.c181
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c30
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c83
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/hv/hv.c2
-rw-r--r--drivers/hwmon/coretemp.c60
-rw-r--r--drivers/hwmon/da9052-hwmon.c4
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/k10temp.c3
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/nct6775.c38
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/ide/buddha.c2
-rw-r--r--drivers/idle/intel_idle.c22
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/bma180.c7
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c12
-rw-r--r--drivers/iio/adc/ad7266.c21
-rw-r--r--drivers/iio/adc/max1363.c8
-rw-r--r--drivers/iio/adc/mcp3422.c9
-rw-r--r--drivers/iio/adc/viperboard_adc.c20
-rw-r--r--drivers/iio/dac/ad5064.c7
-rw-r--r--drivers/iio/dac/ad5360.c7
-rw-r--r--drivers/iio/dac/ad5380.c7
-rw-r--r--drivers/iio/dac/ad5421.c26
-rw-r--r--drivers/iio/dac/ad5446.c9
-rw-r--r--drivers/iio/dac/ad5449.c7
-rw-r--r--drivers/iio/dac/ad5504.c46
-rw-r--r--drivers/iio/dac/ad5624r_spi.c7
-rw-r--r--drivers/iio/dac/ad5686.c11
-rw-r--r--drivers/iio/dac/ad5755.c21
-rw-r--r--drivers/iio/dac/ad5764.c7
-rw-r--r--drivers/iio/dac/ad5791.c55
-rw-r--r--drivers/iio/dac/max517.c1
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/gyro/adis16130.c9
-rw-r--r--drivers/iio/gyro/adxrs450.c14
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c11
-rw-r--r--drivers/iio/humidity/Kconfig15
-rw-r--r--drivers/iio/humidity/Makefile5
-rw-r--r--drivers/iio/humidity/dht11.c294
-rw-r--r--drivers/iio/industrialio-buffer.c33
-rw-r--r--drivers/iio/industrialio-core.c102
-rw-r--r--drivers/iio/industrialio-event.c160
-rw-r--r--drivers/iio/industrialio-trigger.c40
-rw-r--r--drivers/iio/kfifo_buf.c23
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/adjd_s311.c7
-rw-r--r--drivers/iio/light/apds9300.c8
-rw-r--r--drivers/iio/light/cm32181.c379
-rw-r--r--drivers/iio/light/cm36651.c35
-rw-r--r--drivers/iio/light/gp2ap020a00f.c8
-rw-r--r--drivers/iio/light/hid-sensor-als.c11
-rw-r--r--drivers/iio/light/tcs3472.c7
-rw-r--r--drivers/iio/light/tsl2563.c12
-rw-r--r--drivers/iio/light/vcnl4000.c11
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c12
-rw-r--r--drivers/iio/magnetometer/mag3110.c6
-rw-r--r--drivers/iio/orientation/Kconfig19
-rw-r--r--drivers/iio/orientation/Makefile6
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c428
-rw-r--r--drivers/iio/pressure/Kconfig12
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/mpl3115.c329
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/serio/serport.c28
-rw-r--r--drivers/input/touchscreen/zforce_ts.c21
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-lp5523.c12
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/bitmap.h2
-rw-r--r--drivers/md/md.c18
-rw-r--r--drivers/md/md.h13
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/mfd/rtsx_pcr.c10
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/bmp085-i2c.c2
-rw-r--r--drivers/misc/bmp085-spi.c2
-rw-r--r--drivers/misc/bmp085.c39
-rw-r--r--drivers/misc/bmp085.h2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/genwqe/Kconfig13
-rw-r--r--drivers/misc/genwqe/Makefile7
-rw-r--r--drivers/misc/genwqe/card_base.c1205
-rw-r--r--drivers/misc/genwqe/card_base.h557
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1376
-rw-r--r--drivers/misc/genwqe/card_ddcb.h188
-rw-r--r--drivers/misc/genwqe/card_debugfs.c500
-rw-r--r--drivers/misc/genwqe/card_dev.c1414
-rw-r--r--drivers/misc/genwqe/card_sysfs.c288
-rw-r--r--drivers/misc/genwqe/card_utils.c944
-rw-r--r--drivers/misc/genwqe/genwqe_driver.h77
-rw-r--r--drivers/misc/lkdtm.c7
-rw-r--r--drivers/misc/mei/amthif.c6
-rw-r--r--drivers/misc/mei/client.c27
-rw-r--r--drivers/misc/mei/debugfs.c4
-rw-r--r--drivers/misc/mei/hbm.c239
-rw-r--r--drivers/misc/mei/hbm.h7
-rw-r--r--drivers/misc/mei/hw-me.c40
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mei/init.c278
-rw-r--r--drivers/misc/mei/interrupt.c122
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/mei_dev.h33
-rw-r--r--drivers/misc/mei/nfc.c20
-rw-r--r--drivers/misc/mei/pci-me.c27
-rw-r--r--drivers/misc/mei/wd.c1
-rw-r--r--drivers/misc/mic/host/mic_device.h5
-rw-r--r--drivers/misc/mic/host/mic_main.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c2
-rw-r--r--drivers/misc/mic/host/mic_x100.c36
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c5
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c10
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/ethernet/8390/hydra.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c4
-rw-r--r--drivers/net/ethernet/amd/a2065.c13
-rw-r--r--drivers/net/ethernet/amd/ariadne.c13
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c40
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c20
-rw-r--r--drivers/net/macvlan.c26
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c56
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c18
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c20
-rw-r--r--drivers/parport/parport_serial.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/pci-acpi.c21
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/phy/Kconfig6
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-core.c44
-rw-r--r--drivers/phy/phy-mvebu-sata.c137
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/power_supply_core.c12
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/rtc/rtc-cmos.c52
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/char/sclp.h1
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_early.c125
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/cio/blacklist.c6
-rw-r--r--drivers/s390/cio/ccwgroup.c12
-rw-r--r--drivers/s390/cio/chsc.c73
-rw-r--r--drivers/s390/cio/chsc.h51
-rw-r--r--drivers/s390/cio/css.c26
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c29
-rw-r--r--drivers/s390/cio/qdio_main.c91
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c109
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c20
-rw-r--r--drivers/s390/crypto/zcrypt_error.h18
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c12
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c260
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h2
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c11
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c12
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/a4000t.c2
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/zorro7xx.c2
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/Kconfig2
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/alarm-dev.c8
-rw-r--r--drivers/staging/android/ion/Kconfig35
-rw-r--r--drivers/staging/android/ion/Makefile10
-rw-r--r--drivers/staging/android/ion/compat_ion.c177
-rw-r--r--drivers/staging/android/ion/compat_ion.h30
-rw-r--r--drivers/staging/android/ion/ion.c1549
-rw-r--r--drivers/staging/android/ion/ion.h204
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c194
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c195
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c218
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c158
-rw-r--r--drivers/staging/android/ion/ion_heap.c318
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c195
-rw-r--r--drivers/staging/android/ion/ion_priv.h360
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c488
-rw-r--r--drivers/staging/android/ion/ion_test.c282
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c84
-rw-r--r--drivers/staging/android/sync.h50
-rw-r--r--drivers/staging/android/uapi/ion.h196
-rw-r--r--drivers/staging/android/uapi/ion_test.h70
-rw-r--r--drivers/staging/bcm/Adapter.h2
-rw-r--r--drivers/staging/bcm/Bcmchar.c142
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/bcm/DDRInit.c2042
-rw-r--r--drivers/staging/bcm/InterfaceDld.c140
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c230
-rw-r--r--drivers/staging/bcm/InterfaceInit.c10
-rw-r--r--drivers/staging/bcm/InterfaceRx.c187
-rw-r--r--drivers/staging/bcm/InterfaceTx.c151
-rw-r--r--drivers/staging/bcm/PHSModule.c4
-rw-r--r--drivers/staging/bcm/Qos.c6
-rw-r--r--drivers/staging/bcm/nvm.c80
-rw-r--r--drivers/staging/btmtk_usb/Kconfig11
-rw-r--r--drivers/staging/btmtk_usb/Makefile1
-rw-r--r--drivers/staging/btmtk_usb/README14
-rw-r--r--drivers/staging/btmtk_usb/TODO10
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c1810
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.h138
-rw-r--r--drivers/staging/ced1401/ced_ioc.c3
-rw-r--r--drivers/staging/ced1401/usb1401.c1
-rw-r--r--drivers/staging/comedi/Kconfig6
-rw-r--r--drivers/staging/comedi/Makefile2
-rw-r--r--drivers/staging/comedi/comedi_buf.c99
-rw-r--r--drivers/staging/comedi/comedi_fops.c527
-rw-r--r--drivers/staging/comedi/comedi_internal.h4
-rw-r--r--drivers/staging/comedi/comedidev.h40
-rw-r--r--drivers/staging/comedi/drivers.c34
-rw-r--r--drivers/staging/comedi/drivers/8255.c6
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/Makefile2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c6
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c36
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c89
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c29
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c128
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c6
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c13
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h8
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c62
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c109
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c54
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c57
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c270
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c19
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c9
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/das08.c75
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c107
-rw-r--r--drivers/staging/comedi/drivers/das1800.c139
-rw-r--r--drivers/staging/comedi/drivers/das6402.c11
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c94
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c109
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c149
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c70
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c19
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c188
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c54
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c1
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c16
-rw-r--r--drivers/staging/comedi/drivers/fl512.c21
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c80
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c15
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c2
-rw-r--r--drivers/staging/comedi/drivers/me4000.c52
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c2
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c354
-rw-r--r--drivers/staging/comedi/drivers/mite.c145
-rw-r--r--drivers/staging/comedi/drivers/mite.h12
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c708
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c111
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c95
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c631
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c179
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c63
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c315
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h209
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h431
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c88
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c330
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c195
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c249
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c6
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c1476
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c428
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h13
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c7
-rw-r--r--drivers/staging/comedi/drivers/s626.c21
-rw-r--r--drivers/staging/comedi/drivers/skel.c2
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c68
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c1
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c6
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c9
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile2
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c74
-rw-r--r--drivers/staging/comedi/proc.c6
-rw-r--r--drivers/staging/comedi/range.c9
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c4
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h6
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c4
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h2
-rw-r--r--drivers/staging/cxt1e1/comet.c60
-rw-r--r--drivers/staging/cxt1e1/comet.h601
-rw-r--r--drivers/staging/cxt1e1/functions.c2
-rw-r--r--drivers/staging/cxt1e1/musycc.c4
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c10
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h2
-rw-r--r--drivers/staging/cxt1e1/sbeid.c323
-rw-r--r--drivers/staging/dgap/dgap_conf.h6
-rw-r--r--drivers/staging/dgap/dgap_driver.c18
-rw-r--r--drivers/staging/dgap/dgap_driver.h1
-rw-r--r--drivers/staging/dgap/dgap_fep5.c112
-rw-r--r--drivers/staging/dgap/dgap_parse.c1
-rw-r--r--drivers/staging/dgap/dgap_trace.c17
-rw-r--r--drivers/staging/dgap/dgap_tty.c180
-rw-r--r--drivers/staging/dgap/downld.c168
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c253
-rw-r--r--drivers/staging/dgnc/dgnc_trace.c19
-rw-r--r--drivers/staging/dgrp/dgrp_driver.c1
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c4
-rw-r--r--drivers/staging/dwc2/TODO33
-rw-r--r--drivers/staging/et131x/README4
-rw-r--r--drivers/staging/et131x/et131x.c841
-rw-r--r--drivers/staging/et131x/et131x.h40
-rw-r--r--drivers/staging/frontier/alphatrack.c1
-rw-r--r--drivers/staging/frontier/tranzport.c1
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h304
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c766
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c133
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c1309
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c14
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c5
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h5
-rw-r--r--drivers/staging/fwserial/Kconfig20
-rw-r--r--drivers/staging/fwserial/fwserial.c151
-rw-r--r--drivers/staging/fwserial/fwserial.h24
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c44
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c8
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c1
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c40
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c5
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c1
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c7
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c8
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c16
-rw-r--r--drivers/staging/iio/adc/ad7280a.c28
-rw-r--r--drivers/staging/iio/adc/ad7291.c8
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c7
-rw-r--r--drivers/staging/iio/adc/ad7816.c12
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c6
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c12
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c311
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c6
-rw-r--r--drivers/staging/iio/addac/adt7316.c12
-rw-r--r--drivers/staging/iio/addac/adt7316.h1
-rw-r--r--drivers/staging/iio/cdc/ad7150.c8
-rw-r--r--drivers/staging/iio/cdc/ad7746.c14
-rw-r--r--drivers/staging/iio/frequency/ad9832.h6
-rw-r--r--drivers/staging/iio/frequency/ad9834.h4
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c10
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c8
-rw-r--r--drivers/staging/iio/light/isl29018.c13
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c40
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c10
-rw-r--r--drivers/staging/imx-drm/Kconfig6
-rw-r--r--drivers/staging/imx-drm/Makefile1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c3
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c1916
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.h1032
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c5
-rw-r--r--drivers/staging/imx-drm/imx-tve.c2
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c1
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c3
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c12
-rw-r--r--drivers/staging/imx-drm/parallel-display.c3
-rw-r--r--drivers/staging/keucr/smcommon.h2
-rw-r--r--drivers/staging/keucr/smil.h8
-rw-r--r--drivers/staging/keucr/smilecc.c2
-rw-r--r--drivers/staging/keucr/smilmain.c101
-rw-r--r--drivers/staging/keucr/smilsub.c37
-rw-r--r--drivers/staging/keucr/smscsi.c28
-rw-r--r--drivers/staging/keucr/usb.c1
-rw-r--r--drivers/staging/line6/driver.c7
-rw-r--r--drivers/staging/line6/pcm.c1
-rw-r--r--drivers/staging/line6/usbdefs.h7
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h210
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h64
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/kp30.h150
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h92
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h5
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h11
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h4
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c358
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c307
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c124
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c16
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c21
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c39
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c63
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c30
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c7
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c64
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c105
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h8
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c5
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h5
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c11
-rw-r--r--drivers/staging/lustre/lustre/Kconfig2
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c7
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c3
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/dt_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_acl.h18
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_debug.h47
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_intent.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h19
-rw-r--r--drivers/staging/lustre/lustre/include/lu_target.h91
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/liblustreapi.h43
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h72
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h42
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustreapi.h310
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h19
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h90
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h245
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h17
-rw-r--r--drivers/staging/lustre/lustre/include/md_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h15
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h12
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c18
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c45
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c15
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c57
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c49
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c23
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c42
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c12
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c7
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c13
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c11
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c36
-rw-r--r--drivers/staging/lustre/lustre/libcfs/lwt.c266
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c55
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c34
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c34
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c657
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h76
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c76
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c61
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c104
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c617
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile4
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h16
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c35
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c20
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c14
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c40
-rw-r--r--drivers/staging/lustre/lustre/lvfs/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c760
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c31
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c113
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c86
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mgc/libmgc.c3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c3
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c416
-rw-r--r--drivers/staging/lustre/lustre/obdclass/capa.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c20
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c21
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c214
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_test.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.h3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c356
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c30
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c25
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c75
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c40
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c73
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_server.c450
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c81
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c375
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c70
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c50
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c77
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wirehdr.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c86
-rw-r--r--drivers/staging/media/as102/as102_drv.c1
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c1
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c4
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c2
-rw-r--r--drivers/staging/media/go7007/go7007-driver.c1
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c4
-rw-r--r--drivers/staging/media/go7007/go7007-i2c.c1
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c1
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c1
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c1
-rw-r--r--drivers/staging/media/go7007/s2250-board.c1
-rw-r--r--drivers/staging/media/go7007/saa7134-go7007.c2
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c1
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c4
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c12
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c1
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-core.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c1
-rw-r--r--drivers/staging/octeon/ethernet.c1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c3
-rw-r--r--drivers/staging/ozwpan/ozpd.c1
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c1
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c1
-rw-r--r--drivers/staging/panel/panel.c4
-rw-r--r--drivers/staging/phison/phison.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.c132
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.h66
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h125
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c13
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c53
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c39
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c188
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c64
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c13
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8187se/r8180.h35
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c43
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_hw.h4
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h3
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c257
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c92
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c87
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c67
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_io.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c36
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c163
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c99
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c23
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c27
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c33
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c71
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c122
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c136
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c292
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c918
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c361
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c163
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c62
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_interface.c102
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_mp.c40
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c12
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c12
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c1
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h2
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h29
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h90
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h2
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h15
-rw-r--r--drivers/staging/rtl8188eu/include/odm_interface.h54
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h7
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h143
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h70
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h105
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_io.h36
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h62
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h2
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops.h4
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c125
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c16
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c139
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c12
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/dot11d.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c13
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c10
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h37
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_endianfree.h44
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c76
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c32
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c16
-rw-r--r--drivers/staging/rtl8192u/r8192U.h1
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c5
-rw-r--r--drivers/staging/rts5139/ms.c2
-rw-r--r--drivers/staging/rts5139/rts51x.c1
-rw-r--r--drivers/staging/rts5139/rts51x_card.c10
-rw-r--r--drivers/staging/rts5139/rts51x_card.h8
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c1
-rw-r--r--drivers/staging/rts5208/Kconfig15
-rw-r--r--drivers/staging/rts5208/Makefile6
-rw-r--r--drivers/staging/rts5208/TODO7
-rw-r--r--drivers/staging/rts5208/debug.h43
-rw-r--r--drivers/staging/rts5208/general.c35
-rw-r--r--drivers/staging/rts5208/general.h31
-rw-r--r--drivers/staging/rts5208/ms.c4208
-rw-r--r--drivers/staging/rts5208/ms.h227
-rw-r--r--drivers/staging/rts5208/rtsx.c1071
-rw-r--r--drivers/staging/rts5208/rtsx.h185
-rw-r--r--drivers/staging/rts5208/rtsx_card.c1126
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1098
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c1979
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h1002
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3370
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h143
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h50
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c769
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h66
-rw-r--r--drivers/staging/rts5208/sd.c4525
-rw-r--r--drivers/staging/rts5208/sd.h301
-rw-r--r--drivers/staging/rts5208/spi.c877
-rw-r--r--drivers/staging/rts5208/spi.h65
-rw-r--r--drivers/staging/rts5208/trace.h93
-rw-r--r--drivers/staging/rts5208/xd.c2088
-rw-r--r--drivers/staging/rts5208/xd.h188
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h8
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c4
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.h1
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.c14
-rw-r--r--drivers/staging/sep/sep_crypto.c3
-rw-r--r--drivers/staging/sep/sep_main.c3
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c18
-rw-r--r--drivers/staging/silicom/bpctl_mod.c2
-rw-r--r--drivers/staging/silicom/bypasslib/bypass.c170
-rw-r--r--drivers/staging/slicoss/README1
-rw-r--r--drivers/staging/slicoss/slicoss.c23
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c4
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/serialio.c4
-rw-r--r--drivers/staging/speakup/serialio.h26
-rw-r--r--drivers/staging/tidspbridge/Makefile2
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c148
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c85
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gh.h12
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/uuidutil.h18
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c7
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c98
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c6
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c20
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c12
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c108
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c33
-rw-r--r--drivers/staging/usbip/stub_rx.c20
-rw-r--r--drivers/staging/usbip/usbip_common.c5
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.c2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c69
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.h2
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.c6
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.h1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_port.c57
-rw-r--r--drivers/staging/usbip/vhci_hcd.c15
-rw-r--r--drivers/staging/vme/devices/vme_user.c14
-rw-r--r--drivers/staging/vme/devices/vme_user.h26
-rw-r--r--drivers/staging/vt6655/80211hdr.h2
-rw-r--r--drivers/staging/vt6655/baseband.c4
-rw-r--r--drivers/staging/vt6655/bssdb.c354
-rw-r--r--drivers/staging/vt6655/card.c10
-rw-r--r--drivers/staging/vt6655/channel.c16
-rw-r--r--drivers/staging/vt6655/datarate.c8
-rw-r--r--drivers/staging/vt6655/device.h1
-rw-r--r--drivers/staging/vt6655/device_main.c62
-rw-r--r--drivers/staging/vt6655/dpc.c40
-rw-r--r--drivers/staging/vt6655/hostap.c14
-rw-r--r--drivers/staging/vt6655/iwctl.c4
-rw-r--r--drivers/staging/vt6655/key.c44
-rw-r--r--drivers/staging/vt6655/mac.c4
-rw-r--r--drivers/staging/vt6655/power.c9
-rw-r--r--drivers/staging/vt6655/rf.c2
-rw-r--r--drivers/staging/vt6655/rxtx.c12
-rw-r--r--drivers/staging/vt6655/vntwifi.c6
-rw-r--r--drivers/staging/vt6655/wcmd.c22
-rw-r--r--drivers/staging/vt6655/wctl.c6
-rw-r--r--drivers/staging/vt6655/wmgr.c125
-rw-r--r--drivers/staging/vt6655/wpa.c2
-rw-r--r--drivers/staging/vt6655/wpa2.c18
-rw-r--r--drivers/staging/vt6655/wpactl.c44
-rw-r--r--drivers/staging/vt6655/wpactl.h12
-rw-r--r--drivers/staging/vt6655/wroute.c50
-rw-r--r--drivers/staging/vt6656/Makefile1
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c16
-rw-r--r--drivers/staging/vt6656/baseband.c335
-rw-r--r--drivers/staging/vt6656/bssdb.c2271
-rw-r--r--drivers/staging/vt6656/bssdb.h4
-rw-r--r--drivers/staging/vt6656/card.c108
-rw-r--r--drivers/staging/vt6656/channel.c3
-rw-r--r--drivers/staging/vt6656/datarate.c2
-rw-r--r--drivers/staging/vt6656/datarate.h1
-rw-r--r--drivers/staging/vt6656/desc.h8
-rw-r--r--drivers/staging/vt6656/device.h30
-rw-r--r--drivers/staging/vt6656/device_cfg.h2
-rw-r--r--drivers/staging/vt6656/dpc.c49
-rw-r--r--drivers/staging/vt6656/int.c60
-rw-r--r--drivers/staging/vt6656/iwctl.c20
-rw-r--r--drivers/staging/vt6656/main_usb.c553
-rw-r--r--drivers/staging/vt6656/mib.c489
-rw-r--r--drivers/staging/vt6656/mib.h378
-rw-r--r--drivers/staging/vt6656/rf.c7
-rw-r--r--drivers/staging/vt6656/rndis.h30
-rw-r--r--drivers/staging/vt6656/rxtx.c344
-rw-r--r--drivers/staging/vt6656/rxtx.h12
-rw-r--r--drivers/staging/vt6656/tkip.c4
-rw-r--r--drivers/staging/vt6656/usbpipe.c10
-rw-r--r--drivers/staging/vt6656/wcmd.c1712
-rw-r--r--drivers/staging/vt6656/wmgr.c10
-rw-r--r--drivers/staging/vt6656/wpa.c12
-rw-r--r--drivers/staging/vt6656/wpa2.c16
-rw-r--r--drivers/staging/vt6656/wpactl.c2
-rw-r--r--drivers/staging/winbond/mds.c101
-rw-r--r--drivers/staging/wlags49_h2/debug.h56
-rw-r--r--drivers/staging/wlags49_h2/sta_h25.c6
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c67
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c155
-rw-r--r--drivers/staging/wlags49_h2/wl_main.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c97
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c1578
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.h109
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c134
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c17
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c59
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h34
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c188
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.h2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c18
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h6
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c6
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c7
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c10
-rw-r--r--drivers/staging/xgifb/XGI_main.h2
-rw-r--r--drivers/staging/xillybus/Kconfig2
-rw-r--r--drivers/staging/xillybus/xillybus_of.c26
-rw-r--r--drivers/staging/xillybus/xillybus_pcie.c4
-rw-r--r--drivers/staging/zsmalloc/Kconfig13
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c86
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h9
-rw-r--r--drivers/thermal/intel_powerclamp.c6
-rw-r--r--drivers/tty/amiserial.c26
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/goldfish.c1
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/hvc/hvsi_lib.c1
-rw-r--r--drivers/tty/ipwireless/tty.c1
-rw-r--r--drivers/tty/n_gsm.c63
-rw-r--r--drivers/tty/n_r3964.c2
-rw-r--r--drivers/tty/n_tty.c113
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dw.c9
-rw-r--r--drivers/tty/serial/8250/8250_em.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c9
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c1
-rw-r--r--drivers/tty/serial/8250/serial_cs.c1
-rw-r--r--drivers/tty/serial/Kconfig21
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl010.c15
-rw-r--r--drivers/tty/serial/amba-pl011.c84
-rw-r--r--drivers/tty/serial/atmel_serial.c45
-rw-r--r--drivers/tty/serial/clps711x.c454
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c1
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c1
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/kgdb_nmi.c1
-rw-r--r--drivers/tty/serial/mxs-auart.c9
-rw-r--r--drivers/tty/serial/of_serial.c1
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/rp2.c2
-rw-r--r--drivers/tty/serial/sc26xx.c749
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c39
-rw-r--r--drivers/tty/synclink.c4
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c4
-rw-r--r--drivers/tty/tty_buffer.c111
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/tty/tty_port.c1
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_mf624.c2
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/dwc2/Kconfig (renamed from drivers/staging/dwc2/Kconfig)0
-rw-r--r--drivers/usb/dwc2/Makefile (renamed from drivers/staging/dwc2/Makefile)0
-rw-r--r--drivers/usb/dwc2/core.c (renamed from drivers/staging/dwc2/core.c)378
-rw-r--r--drivers/usb/dwc2/core.h (renamed from drivers/staging/dwc2/core.h)62
-rw-r--r--drivers/usb/dwc2/core_intr.c (renamed from drivers/staging/dwc2/core_intr.c)10
-rw-r--r--drivers/usb/dwc2/hcd.c (renamed from drivers/staging/dwc2/hcd.c)22
-rw-r--r--drivers/usb/dwc2/hcd.h (renamed from drivers/staging/dwc2/hcd.h)4
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c (renamed from drivers/staging/dwc2/hcd_ddma.c)8
-rw-r--r--drivers/usb/dwc2/hcd_intr.c (renamed from drivers/staging/dwc2/hcd_intr.c)6
-rw-r--r--drivers/usb/dwc2/hcd_queue.c (renamed from drivers/staging/dwc2/hcd_queue.c)195
-rw-r--r--drivers/usb/dwc2/hw.h (renamed from drivers/staging/dwc2/hw.h)0
-rw-r--r--drivers/usb/dwc2/pci.c (renamed from drivers/staging/dwc2/pci.c)2
-rw-r--r--drivers/usb/dwc2/platform.c (renamed from drivers/staging/dwc2/platform.c)60
-rw-r--r--drivers/video/amifb.c2
-rw-r--r--drivers/video/cirrusfb.c4
-rw-r--r--drivers/video/macfb.c1
-rw-r--r--drivers/video/valkyriefb.c1
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/vme/Kconfig2
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/vme/bridges/vme_tsi148.c2
-rw-r--r--drivers/vme/vme.c25
-rw-r--r--drivers/w1/masters/mxc_w1.c31
-rw-r--r--drivers/zorro/Makefile3
-rw-r--r--drivers/zorro/names.c11
-rw-r--r--drivers/zorro/proc.c10
-rw-r--r--drivers/zorro/zorro-driver.c11
-rw-r--r--drivers/zorro/zorro-sysfs.c22
-rw-r--r--drivers/zorro/zorro.c27
-rw-r--r--drivers/zorro/zorro.h5
-rw-r--r--fs/Makefile2
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/dir.c11
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/link.c26
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext4/ext4.h10
-rw-r--r--fs/ext4/ext4_jbd2.c9
-rw-r--r--fs/ext4/extents.c45
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/fs-writeback.c15
-rw-r--r--fs/gfs2/aops.c30
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/glops.c10
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/meta_io.c5
-rw-r--r--fs/gfs2/ops_fstype.c12
-rw-r--r--fs/jbd2/journal.c18
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c16
-rw-r--r--fs/kernfs/Makefile5
-rw-r--r--fs/kernfs/dir.c1073
-rw-r--r--fs/kernfs/file.c867
-rw-r--r--fs/kernfs/inode.c377
-rw-r--r--fs/kernfs/kernfs-internal.h122
-rw-r--r--fs/kernfs/mount.c165
-rw-r--r--fs/kernfs/symlink.c151
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nilfs2/segment.c10
-rw-r--r--fs/sysfs/Makefile2
-rw-r--r--fs/sysfs/dir.c1075
-rw-r--r--fs/sysfs/file.c961
-rw-r--r--fs/sysfs/group.c102
-rw-r--r--fs/sysfs/inode.c331
-rw-r--r--fs/sysfs/mount.c184
-rw-r--r--fs/sysfs/symlink.c219
-rw-r--r--fs/sysfs/sysfs.h236
-rw-r--r--fs/xfs/xfs_attr_remote.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c1
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/asm-generic/barrier.h55
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/bottom_half.h32
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/component.h32
-rw-r--r--include/linux/context_tracking.h10
-rw-r--r--include/linux/context_tracking_state.h11
-rw-r--r--include/linux/crash_dump.h2
-rw-r--r--include/linux/edac.h28
-rw-r--r--include/linux/efi.h17
-rw-r--r--include/linux/extcon/extcon-gpio.h1
-rw-r--r--include/linux/firmware.h7
-rw-r--r--include/linux/hardirq.h1
-rw-r--r--include/linux/hid-sensor-ids.h12
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/iio/buffer.h3
-rw-r--r--include/linux/iio/events.h4
-rw-r--r--include/linux/iio/iio.h115
-rw-r--r--include/linux/iio/types.h1
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/irqdesc.h8
-rw-r--r--include/linux/jump_label.h19
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kernfs.h376
-rw-r--r--include/linux/kobj_completion.h18
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mfd/arizona/registers.h9
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h1
-rw-r--r--include/linux/netdevice.h34
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/platform_data/hwmon-s3c.h10
-rw-r--r--include/linux/platform_data/max197.h5
-rw-r--r--include/linux/platform_data/sht15.h5
-rw-r--r--include/linux/preempt.h37
-rw-r--r--include/linux/preempt_mask.h16
-rw-r--r--include/linux/rculist.h4
-rw-r--r--include/linux/rcupdate.h153
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h36
-rw-r--r--include/linux/rtmutex.h18
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/rwlock_api_smp.h12
-rw-r--r--include/linux/sched.h141
-rw-r--r--include/linux/sched/deadline.h24
-rw-r--r--include/linux/sched/rt.h5
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/seqlock.h27
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/spinlock.h10
-rw-r--r--include/linux/spinlock_api_smp.h12
-rw-r--r--include/linux/spinlock_api_up.h16
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/sysfs.h47
-rw-r--r--include/linux/tick.h8
-rw-r--r--include/linux/tty.h8
-rw-r--r--include/linux/tty_flip.h11
-rw-r--r--include/linux/tty_ldisc.h6
-rw-r--r--include/linux/uaccess.h5
-rw-r--r--include/linux/uprobes.h52
-rw-r--r--include/linux/vme.h3
-rw-r--r--include/linux/vtime.h4
-rw-r--r--include/linux/zorro.h121
-rw-r--r--include/net/busy_poll.h19
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/trace/events/ras.h10
-rw-r--r--include/uapi/asm-generic/statfs.h2
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/genwqe/genwqe_card.h500
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/linux/sched.h6
-rw-r--r--include/uapi/linux/zorro.h113
-rw-r--r--include/uapi/linux/zorro_ids.h (renamed from include/linux/zorro_ids.h)0
-rw-r--r--include/xen/interface/callback.h2
-rw-r--r--include/xen/interface/io/protocols.h3
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/cgroup.c50
-rw-r--r--kernel/context_tracking.c8
-rw-r--r--kernel/cpu/idle.c17
-rw-r--r--kernel/events/core.c31
-rw-r--r--kernel/events/ring_buffer.c42
-rw-r--r--kernel/events/uprobes.c60
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/freezer.c6
-rw-r--r--kernel/futex.c203
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/mutex-debug.c7
-rw-r--r--kernel/locking/rtmutex-debug.c8
-rw-r--r--kernel/locking/rtmutex.c166
-rw-r--r--kernel/locking/rtmutex_common.h23
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/posix-cpu-timers.c327
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/rcu/rcu.h5
-rw-r--r--kernel/rcu/srcu.c57
-rw-r--r--kernel/rcu/torture.c75
-rw-r--r--kernel/rcu/tree.c97
-rw-r--r--kernel/rcu/tree.h12
-rw-r--r--kernel/rcu/tree_plugin.h102
-rw-r--r--kernel/rcu/tree_trace.c3
-rw-r--r--kernel/rcu/update.c5
-rw-r--r--kernel/sched/Makefile5
-rw-r--r--kernel/sched/clock.c78
-rw-r--r--kernel/sched/core.c822
-rw-r--r--kernel/sched/cpudeadline.c216
-rw-r--r--kernel/sched/cpudeadline.h33
-rw-r--r--kernel/sched/deadline.c1640
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c85
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h146
-rw-r--r--kernel/sched/stop_task.c2
-rw-r--r--kernel/softirq.c92
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/time/sched_clock.c6
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-internal.h5
-rw-r--r--kernel/time/tick-sched.c42
-rw-r--r--kernel/time/timekeeping.c53
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c65
-rw-r--r--kernel/trace/trace_selftest.c33
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/kobject.c95
-rw-r--r--lib/percpu_counter.c4
-rw-r--r--mm/fremap.c8
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/mlock.c44
-rw-r--r--mm/util.c5
-rw-r--r--net/8021q/vlan_dev.c19
-rw-r--r--net/batman-adv/bat_iv_ogm.c36
-rw-r--r--net/batman-adv/distributed-arp-table.c6
-rw-r--r--net/batman-adv/fragmentation.c8
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/main.c18
-rw-r--r--net/batman-adv/network-coding.c22
-rw-r--r--net/batman-adv/packet.h124
-rw-r--r--net/batman-adv/routing.c30
-rw-r--r--net/batman-adv/send.c10
-rw-r--r--net/batman-adv/soft-interface.c18
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_sock.c26
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/filter.c30
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/dccp/probe.c19
-rw-r--r--net/ieee802154/6lowpan.c2
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ipv4/gre_offload.c11
-rw-r--r--net/ipv4/inet_diag.c21
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_metrics.c51
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/udp_offload.c37
-rw-r--r--net/ipv6/addrconf.c61
-rw-r--r--net/ipv6/ip6_output.c36
-rw-r--r--net/ipv6/ip6_tunnel.c21
-rw-r--r--net/ipv6/ip6_vti.c30
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/tx.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c5
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c1
-rw-r--r--net/netfilter/nf_nat_irc.c32
-rw-r--r--net/netfilter/nf_tables_api.c26
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/rds/ib.c3
-rw-r--r--net/rds/ib_recv.c7
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/sched/act_csum.c10
-rw-r--r--net/sched/act_gact.c7
-rw-r--r--net/sched/act_ipt.c8
-rw-r--r--net/sched/act_nat.c10
-rw-r--r--net/sched/act_pedit.c8
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/act_simple.c9
-rw-r--r--net/sched/act_skbedit.c7
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/outqueue.c32
-rw-r--r--net/tipc/link.c1
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c46
-rw-r--r--net/wireless/radiotap.c4
-rw-r--r--net/wireless/sme.c22
-rw-r--r--samples/kobject/kset-example.c1
-rw-r--r--scripts/gcc-goto.sh2
-rw-r--r--scripts/kconfig/streamline_config.pl7
-rw-r--r--scripts/link-vmlinux.sh4
-rw-r--r--security/selinux/hooks.c28
-rw-r--r--security/selinux/include/objsec.h5
-rw-r--r--tools/Makefile12
-rw-r--r--tools/hv/hv_kvp_daemon.c1
-rw-r--r--tools/hv/hv_vss_daemon.c1
-rw-r--r--tools/include/asm/bug.h (renamed from tools/perf/util/include/asm/bug.h)9
-rw-r--r--tools/include/linux/compiler.h (renamed from tools/perf/util/include/linux/compiler.h)12
-rw-r--r--tools/lib/api/Makefile (renamed from tools/lib/lk/Makefile)18
-rw-r--r--tools/lib/api/fs/debugfs.c (renamed from tools/lib/lk/debugfs.c)0
-rw-r--r--tools/lib/api/fs/debugfs.h (renamed from tools/lib/lk/debugfs.h)6
-rw-r--r--tools/lib/lockdep/Makefile251
-rw-r--r--tools/lib/lockdep/common.c33
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h50
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h70
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h86
-rwxr-xr-xtools/lib/lockdep/lockdep3
-rw-r--r--tools/lib/lockdep/lockdep.c2
-rw-r--r--tools/lib/lockdep/lockdep_internals.h1
-rw-r--r--tools/lib/lockdep/lockdep_states.h1
-rw-r--r--tools/lib/lockdep/preload.c447
-rw-r--r--tools/lib/lockdep/rbtree.c1
-rw-r--r--tools/lib/lockdep/run_tests.sh27
-rw-r--r--tools/lib/lockdep/tests/AA.c13
-rw-r--r--tools/lib/lockdep/tests/ABBA.c13
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.c15
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.c17
-rw-r--r--tools/lib/lockdep/tests/ABCABC.c15
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.c17
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.c17
-rw-r--r--tools/lib/lockdep/tests/WW.c13
-rw-r--r--tools/lib/lockdep/tests/common.h12
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.c12
-rw-r--r--tools/lib/lockdep/uinclude/asm/hweight.h3
-rw-r--r--tools/lib/lockdep/uinclude/asm/sections.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/bitops.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/compiler.h7
-rw-r--r--tools/lib/lockdep/uinclude/linux/debug_locks.h12
-rw-r--r--tools/lib/lockdep/uinclude/linux/delay.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/export.h7
-rw-r--r--tools/lib/lockdep/uinclude/linux/ftrace.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/gfp.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/hardirq.h11
-rw-r--r--tools/lib/lockdep/uinclude/linux/hash.h1
-rw-r--r--tools/lib/lockdep/uinclude/linux/interrupt.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/irqflags.h38
-rw-r--r--tools/lib/lockdep/uinclude/linux/kallsyms.h32
-rw-r--r--tools/lib/lockdep/uinclude/linux/kern_levels.h25
-rw-r--r--tools/lib/lockdep/uinclude/linux/kernel.h44
-rw-r--r--tools/lib/lockdep/uinclude/linux/kmemcheck.h8
-rw-r--r--tools/lib/lockdep/uinclude/linux/linkage.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/list.h1
-rw-r--r--tools/lib/lockdep/uinclude/linux/lockdep.h55
-rw-r--r--tools/lib/lockdep/uinclude/linux/module.h6
-rw-r--r--tools/lib/lockdep/uinclude/linux/mutex.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/poison.h1
-rw-r--r--tools/lib/lockdep/uinclude/linux/prefetch.h6
-rw-r--r--tools/lib/lockdep/uinclude/linux/proc_fs.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/rbtree.h1
-rw-r--r--tools/lib/lockdep/uinclude/linux/rbtree_augmented.h2
-rw-r--r--tools/lib/lockdep/uinclude/linux/rcu.h16
-rw-r--r--tools/lib/lockdep/uinclude/linux/seq_file.h3
-rw-r--r--tools/lib/lockdep/uinclude/linux/spinlock.h25
-rw-r--r--tools/lib/lockdep/uinclude/linux/stacktrace.h32
-rw-r--r--tools/lib/lockdep/uinclude/linux/stringify.h7
-rw-r--r--tools/lib/lockdep/uinclude/linux/types.h58
-rw-r--r--tools/lib/lockdep/uinclude/trace/events/lock.h3
-rw-r--r--tools/lib/symbol/kallsyms.c58
-rw-r--r--tools/lib/symbol/kallsyms.h24
-rw-r--r--tools/lib/traceevent/Makefile191
-rw-r--r--tools/lib/traceevent/event-parse.c244
-rw-r--r--tools/lib/traceevent/event-parse.h86
-rw-r--r--tools/lib/traceevent/event-plugin.c215
-rw-r--r--tools/lib/traceevent/event-utils.h4
-rw-r--r--tools/lib/traceevent/parse-filter.c673
-rw-r--r--tools/lib/traceevent/parse-utils.c44
-rw-r--r--tools/lib/traceevent/plugin_cfg80211.c30
-rw-r--r--tools/lib/traceevent/plugin_function.c163
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c88
-rw-r--r--tools/lib/traceevent/plugin_jbd2.c77
-rw-r--r--tools/lib/traceevent/plugin_kmem.c94
-rw-r--r--tools/lib/traceevent/plugin_kvm.c465
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c102
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c160
-rw-r--r--tools/lib/traceevent/plugin_scsi.c429
-rw-r--r--tools/lib/traceevent/plugin_xen.c136
-rw-r--r--tools/lib/traceevent/trace-seq.c67
-rw-r--r--tools/perf/Documentation/perf-archive.txt6
-rw-r--r--tools/perf/Documentation/perf-kvm.txt34
-rw-r--r--tools/perf/Documentation/perf-record.txt20
-rw-r--r--tools/perf/Documentation/perf-report.txt9
-rw-r--r--tools/perf/Documentation/perf-script.txt14
-rw-r--r--tools/perf/Documentation/perf-stat.txt2
-rw-r--r--tools/perf/Documentation/perf-timechart.txt42
-rw-r--r--tools/perf/Documentation/perf-top.txt5
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/Makefile13
-rw-r--r--tools/perf/Makefile.perf66
-rw-r--r--tools/perf/arch/common.c3
-rw-r--r--tools/perf/builtin-annotate.c17
-rw-r--r--tools/perf/builtin-diff.c103
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-inject.c67
-rw-r--r--tools/perf/builtin-kvm.c25
-rw-r--r--tools/perf/builtin-mem.c5
-rw-r--r--tools/perf/builtin-probe.c67
-rw-r--r--tools/perf/builtin-record.c201
-rw-r--r--tools/perf/builtin-report.c433
-rw-r--r--tools/perf/builtin-sched.c2
-rw-r--r--tools/perf/builtin-script.c266
-rw-r--r--tools/perf/builtin-stat.c186
-rw-r--r--tools/perf/builtin-timechart.c754
-rw-r--r--tools/perf/builtin-top.c88
-rw-r--r--tools/perf/builtin-trace.c98
-rw-r--r--tools/perf/config/Makefile136
-rw-r--r--tools/perf/config/Makefile.arch22
-rw-r--r--tools/perf/config/feature-checks/.gitignore2
-rw-r--r--tools/perf/config/feature-checks/Makefile129
-rw-r--r--tools/perf/config/feature-checks/test-all.c5
-rw-r--r--tools/perf/config/feature-checks/test-stackprotector.c6
-rw-r--r--tools/perf/config/feature-checks/test-volatile-register-var.c6
-rw-r--r--tools/perf/config/utilities.mak7
-rw-r--r--tools/perf/perf-completion.sh (renamed from tools/perf/bash_completion)104
-rw-r--r--tools/perf/perf.c2
-rw-r--r--tools/perf/perf.h6
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit2
-rw-r--r--tools/perf/tests/code-reading.c9
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c2
-rw-r--r--tools/perf/tests/hists_link.c4
-rw-r--r--tools/perf/tests/keep-tracking.c9
-rw-r--r--tools/perf/tests/make58
-rw-r--r--tools/perf/tests/mmap-basic.c25
-rw-r--r--tools/perf/tests/open-syscall-tp-fields.c26
-rw-r--r--tools/perf/tests/parse-events.c12
-rw-r--r--tools/perf/tests/perf-record.c29
-rwxr-xr-xtools/perf/tests/perf-targz-src-pkg21
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c8
-rw-r--r--tools/perf/tests/sw-clock.c18
-rw-r--r--tools/perf/tests/task-exit.c33
-rw-r--r--tools/perf/ui/browser.c8
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/header.c127
-rw-r--r--tools/perf/ui/browsers/hists.c68
-rw-r--r--tools/perf/ui/browsers/scripts.c3
-rw-r--r--tools/perf/ui/gtk/hists.c2
-rw-r--r--tools/perf/ui/gtk/util.c3
-rw-r--r--tools/perf/ui/stdio/hist.c2
-rw-r--r--tools/perf/ui/tui/util.c19
-rw-r--r--tools/perf/util/alias.c6
-rw-r--r--tools/perf/util/annotate.c75
-rw-r--r--tools/perf/util/annotate.h9
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/callchain.c46
-rw-r--r--tools/perf/util/callchain.h8
-rw-r--r--tools/perf/util/cgroup.c6
-rw-r--r--tools/perf/util/color.c15
-rw-r--r--tools/perf/util/color.h1
-rw-r--r--tools/perf/util/comm.c21
-rw-r--r--tools/perf/util/comm.h2
-rw-r--r--tools/perf/util/data.c6
-rw-r--r--tools/perf/util/data.h14
-rw-r--r--tools/perf/util/debug.c31
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/dso.c134
-rw-r--r--tools/perf/util/dso.h31
-rw-r--r--tools/perf/util/event.c61
-rw-r--r--tools/perf/util/event.h7
-rw-r--r--tools/perf/util/evlist.c123
-rw-r--r--tools/perf/util/evlist.h81
-rw-r--r--tools/perf/util/evsel.c75
-rw-r--r--tools/perf/util/evsel.h7
-rw-r--r--tools/perf/util/header.c69
-rw-r--r--tools/perf/util/header.h10
-rw-r--r--tools/perf/util/help.c7
-rw-r--r--tools/perf/util/hist.c49
-rw-r--r--tools/perf/util/hist.h3
-rw-r--r--tools/perf/util/machine.c36
-rw-r--r--tools/perf/util/map.c17
-rw-r--r--tools/perf/util/map.h2
-rw-r--r--tools/perf/util/parse-events.c43
-rw-r--r--tools/perf/util/parse-options.c21
-rw-r--r--tools/perf/util/parse-options.h8
-rw-r--r--tools/perf/util/pmu.c142
-rw-r--r--tools/perf/util/pmu.h3
-rw-r--r--tools/perf/util/probe-event.c258
-rw-r--r--tools/perf/util/probe-event.h7
-rw-r--r--tools/perf/util/probe-finder.c33
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c3
-rw-r--r--tools/perf/util/record.c52
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c22
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c28
-rw-r--r--tools/perf/util/session.c110
-rw-r--r--tools/perf/util/session.h10
-rw-r--r--tools/perf/util/setup.py4
-rw-r--r--tools/perf/util/sort.c22
-rw-r--r--tools/perf/util/srcline.c72
-rw-r--r--tools/perf/util/strbuf.c2
-rw-r--r--tools/perf/util/strfilter.c2
-rw-r--r--tools/perf/util/string.c2
-rw-r--r--tools/perf/util/strlist.c3
-rw-r--r--tools/perf/util/svghelper.c235
-rw-r--r--tools/perf/util/svghelper.h17
-rw-r--r--tools/perf/util/symbol-elf.c8
-rw-r--r--tools/perf/util/symbol-minimal.c4
-rw-r--r--tools/perf/util/symbol.c165
-rw-r--r--tools/perf/util/symbol.h16
-rw-r--r--tools/perf/util/target.c11
-rw-r--r--tools/perf/util/target.h17
-rw-r--r--tools/perf/util/thread.c7
-rw-r--r--tools/perf/util/thread.h12
-rw-r--r--tools/perf/util/thread_map.c20
-rw-r--r--tools/perf/util/top.c2
-rw-r--r--tools/perf/util/top.h2
-rw-r--r--tools/perf/util/trace-event-info.c12
-rw-r--r--tools/perf/util/trace-event-parse.c13
-rw-r--r--tools/perf/util/trace-event-read.c20
-rw-r--r--tools/perf/util/trace-event-scripting.c3
-rw-r--r--tools/perf/util/trace-event.c82
-rw-r--r--tools/perf/util/trace-event.h16
-rw-r--r--tools/perf/util/unwind.c28
-rw-r--r--tools/perf/util/util.c136
-rw-r--r--tools/perf/util/util.h26
-rw-r--r--tools/perf/util/values.c14
-rw-r--r--tools/perf/util/vdso.c2
-rw-r--r--tools/scripts/Makefile.include4
-rwxr-xr-xtools/testing/ktest/ktest.pl158
-rw-r--r--tools/testing/ktest/sample.conf21
-rw-r--r--tools/testing/selftests/rcutorture/.gitignore6
-rw-r--r--tools/testing/selftests/rcutorture/bin/config2frag.sh25
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configNR_CPUS.sh45
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh54
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh74
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh41
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh198
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh71
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh44
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh192
-rw-r--r--tools/testing/selftests/rcutorture/bin/kvm.sh210
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh57
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh41
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-rcutorture.sh106
-rw-r--r--tools/testing/selftests/rcutorture/configs/CFLIST13
-rw-r--r--tools/testing/selftests/rcutorture/configs/SRCU-N8
-rw-r--r--tools/testing/selftests/rcutorture/configs/SRCU-N.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/SRCU-P8
-rw-r--r--tools/testing/selftests/rcutorture/configs/SRCU-P.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/TINY0113
-rw-r--r--tools/testing/selftests/rcutorture/configs/TINY0213
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0123
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0226
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0323
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0425
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE04.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0525
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE05.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0626
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0724
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0826
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE08-T26
-rw-r--r--tools/testing/selftests/rcutorture/configs/TREE0921
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/CFLIST14
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP18
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP18
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP19
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp28
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh35
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/CFLIST17
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP19
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP18
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp19
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP26
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp28
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp18
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP30
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all30
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none30
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp30
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/CFLIST14
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP19
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP18
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp28
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh41
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/CFLIST14
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP19
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP18
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp20
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp28
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh23
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH22
-rw-r--r--tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh46
-rw-r--r--tools/testing/selftests/rcutorture/configs/ver_functions.sh46
-rw-r--r--tools/testing/selftests/rcutorture/doc/TINY_RCU.txt40
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt95
-rw-r--r--tools/testing/selftests/rcutorture/doc/initrd.txt90
-rw-r--r--tools/testing/selftests/rcutorture/doc/rcu-test-image.txt42
-rw-r--r--tools/vm/Makefile14
-rw-r--r--tools/vm/page-types.c2
2345 files changed, 99757 insertions, 48880 deletions
diff --git a/Documentation/ABI/testing/debugfs-driver-genwqe b/Documentation/ABI/testing/debugfs-driver-genwqe
new file mode 100644
index 0000000..1c2f256
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-genwqe
@@ -0,0 +1,91 @@
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/ddcb_info
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: DDCB queue dump used for debugging queueing problems.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_regs
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Dump of the current error registers.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_dbg_uid0
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Internal chip state of UID0 (unit id 0).
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_dbg_uid1
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Internal chip state of UID1.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_dbg_uid2
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Internal chip state of UID2.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_regs
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Dump of the error registers before the last reset of
+ the card occured.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid0
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Internal chip state of UID0 before card was reset.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid1
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Internal chip state of UID1 before card was reset.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid2
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Internal chip state of UID2 before card was reset.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/info
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Comprehensive summary of bitstream version and software
+ version. Used bitstream and bitstream clocking information.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/err_inject
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Possibility to inject error cases to ensure that the drivers
+ error handling code works well.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/vf<0..14>_jobtimeout_msec
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Default VF timeout 250ms. Testing might require 1000ms.
+ Using 0 will use the cards default value (whatever that is).
+
+ The timeout depends on the max number of available cards
+ in the system and the maximum allowed queue size.
+
+ The driver ensures that the settings are done just before
+ the VFs get enabled. Changing the timeouts in flight is not
+ possible.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/jobtimer
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Dump job timeout register values for PF and VFs.
+ Only available for PF.
+
+What: /sys/kernel/debug/genwqe/genwqe<n>_card/queue_working_time
+Date: Dec 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Dump queue working time register values for PF and VFs.
+ Only available for PF.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index b20e829..6e02c50 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -197,6 +197,19 @@ Description:
Raw pressure measurement from channel Y. Units after
application of scale and offset are kilopascal.
+What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_raw
+KernelVersion: 3.14
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw humidity measurement of air. Units after application of
+ scale and offset are milli percent.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_input
+KernelVersion: 3.14
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scaled humidity measurement in milli percent.
+
What: /sys/bus/iio/devices/iio:deviceX/in_accel_offset
What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_offset
What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_offset
diff --git a/Documentation/ABI/testing/sysfs-driver-genwqe b/Documentation/ABI/testing/sysfs-driver-genwqe
new file mode 100644
index 0000000..1870737
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-genwqe
@@ -0,0 +1,62 @@
+What: /sys/class/genwqe/genwqe<n>_card/version
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Unique bitstream identification e.g.
+ '0000000330336283.00000000475a4950'.
+
+What: /sys/class/genwqe/genwqe<n>_card/appid
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Identifies the currently active card application e.g. 'GZIP'
+ for compression/decompression.
+
+What: /sys/class/genwqe/genwqe<n>_card/type
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Type of the card e.g. 'GenWQE5-A7'.
+
+What: /sys/class/genwqe/genwqe<n>_card/curr_bitstream
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Currently active bitstream. 1 is default, 0 is backup.
+
+What: /sys/class/genwqe/genwqe<n>_card/next_bitstream
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Interface to set the next bitstream to be used.
+
+What: /sys/class/genwqe/genwqe<n>_card/tempsens
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Interface to read the cards temperature sense register.
+
+What: /sys/class/genwqe/genwqe<n>_card/freerunning_timer
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Interface to read the cards free running timer.
+ Used for performance and utilization measurements.
+
+What: /sys/class/genwqe/genwqe<n>_card/queue_working_time
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Interface to read queue working time.
+ Used for performance and utilization measurements.
+
+What: /sys/class/genwqe/genwqe<n>_card/state
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: State of the card: "unused", "used", "error".
+
+What: /sys/class/genwqe/genwqe<n>_card/base_clock
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Base clock frequency of the card.
+
+What: /sys/class/genwqe/genwqe<n>_card/device/sriov_numvfs
+Date: Oct 2013
+Contact: haver@linux.vnet.ibm.com
+Description: Enable VFs (1..15):
+ sudo sh -c 'echo 15 > \
+ /sys/bus/pci/devices/0000\:1b\:00.0/sriov_numvfs'
+ Disable VFs:
+ Write a 0 into the same sysfs entry.
diff --git a/Documentation/ABI/testing/sysfs-firmware-efi b/Documentation/ABI/testing/sysfs-firmware-efi
new file mode 100644
index 0000000..05874da
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-efi
@@ -0,0 +1,20 @@
+What: /sys/firmware/efi/fw_vendor
+Date: December 2013
+Contact: Dave Young <dyoung@redhat.com>
+Description: It shows the physical address of firmware vendor field in the
+ EFI system table.
+Users: Kexec
+
+What: /sys/firmware/efi/runtime
+Date: December 2013
+Contact: Dave Young <dyoung@redhat.com>
+Description: It shows the physical address of runtime service table entry in
+ the EFI system table.
+Users: Kexec
+
+What: /sys/firmware/efi/config_table
+Date: December 2013
+Contact: Dave Young <dyoung@redhat.com>
+Description: It shows the physical address of config table entry in the EFI
+ system table.
+Users: Kexec
diff --git a/Documentation/ABI/testing/sysfs-firmware-efi-runtime-map b/Documentation/ABI/testing/sysfs-firmware-efi-runtime-map
new file mode 100644
index 0000000..c61b9b3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-efi-runtime-map
@@ -0,0 +1,34 @@
+What: /sys/firmware/efi/runtime-map/
+Date: December 2013
+Contact: Dave Young <dyoung@redhat.com>
+Description: Switching efi runtime services to virtual mode requires
+ that all efi memory ranges which have the runtime attribute
+ bit set to be mapped to virtual addresses.
+
+ The efi runtime services can only be switched to virtual
+ mode once without rebooting. The kexec kernel must maintain
+ the same physical to virtual address mappings as the first
+ kernel. The mappings are exported to sysfs so userspace tools
+ can reassemble them and pass them into the kexec kernel.
+
+ /sys/firmware/efi/runtime-map/ is the directory the kernel
+ exports that information in.
+
+ subdirectories are named with the number of the memory range:
+
+ /sys/firmware/efi/runtime-map/0
+ /sys/firmware/efi/runtime-map/1
+ /sys/firmware/efi/runtime-map/2
+ /sys/firmware/efi/runtime-map/3
+ ...
+
+ Each subdirectory contains five files:
+
+ attribute : The attributes of the memory range.
+ num_pages : The size of the memory range in pages.
+ phys_addr : The physical address of the memory range.
+ type : The type of the memory range.
+ virt_addr : The virtual address of the memory range.
+
+ Above values are all hexadecimal numbers with the '0x' prefix.
+Users: Kexec
diff --git a/Documentation/ABI/testing/sysfs-kernel-boot_params b/Documentation/ABI/testing/sysfs-kernel-boot_params
new file mode 100644
index 0000000..eca38ce
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-boot_params
@@ -0,0 +1,38 @@
+What: /sys/kernel/boot_params
+Date: December 2013
+Contact: Dave Young <dyoung@redhat.com>
+Description: The /sys/kernel/boot_params directory contains two
+ files: "data" and "version" and one subdirectory "setup_data".
+ It is used to export the kernel boot parameters of an x86
+ platform to userspace for kexec and debugging purpose.
+
+ If there's no setup_data in boot_params the subdirectory will
+ not be created.
+
+ "data" file is the binary representation of struct boot_params.
+
+ "version" file is the string representation of boot
+ protocol version.
+
+ "setup_data" subdirectory contains the setup_data data
+ structure in boot_params. setup_data is maintained in kernel
+ as a link list. In "setup_data" subdirectory there's one
+ subdirectory for each link list node named with the number
+ of the list nodes. The list node subdirectory contains two
+ files "type" and "data". "type" file is the string
+ representation of setup_data type. "data" file is the binary
+ representation of setup_data payload.
+
+ The whole boot_params directory structure is like below:
+ /sys/kernel/boot_params
+ |__ data
+ |__ setup_data
+ | |__ 0
+ | | |__ data
+ | | |__ type
+ | |__ 1
+ | |__ data
+ | |__ type
+ |__ version
+
+Users: Kexec
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 27faae3..57cf5ef 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -112,7 +112,7 @@ required reading:
Other excellent descriptions of how to create patches properly are:
"The Perfect Patch"
- http://kerneltrap.org/node/3737
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
@@ -579,7 +579,7 @@ all time. It should describe the patch completely, containing:
For more details on what this should all look like, please see the
ChangeLog section of the document:
"The Perfect Patch"
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index f3778f8..910870b 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -396,14 +396,14 @@ o Each element of the form "3/3 ..>. 0:7 ^0" represents one rcu_node
The output of "cat rcu/rcu_sched/rcu_pending" looks as follows:
- 0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903
- 1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113
- 2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889
- 3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469
- 4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042
- 5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422
- 6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699
- 7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147
+ 0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903 ndw=0
+ 1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113 ndw=0
+ 2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889 ndw=0
+ 3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469 ndw=0
+ 4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042 ndw=0
+ 5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422 ndw=0
+ 6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699 ndw=0
+ 7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147 ndw=0
The fields are as follows:
@@ -432,6 +432,10 @@ o "gpc" is the number of times that an old grace period had
o "gps" is the number of times that a new grace period had started,
but this CPU was not yet aware of it.
+o "ndw" is the number of times that a wakeup of an rcuo
+ callback-offload kthread had to be deferred in order to avoid
+ deadlock.
+
o "nn" is the number of times that this CPU needed nothing.
@@ -443,7 +447,7 @@ The output of "cat rcu/rcuboost" looks as follows:
balk: nt=0 egt=6541 bt=0 nb=0 ny=126 nos=0
This information is output only for rcu_preempt. Each two-line entry
-corresponds to a leaf rcu_node strcuture. The fields are as follows:
+corresponds to a leaf rcu_node structure. The fields are as follows:
o "n:m" is the CPU-number range for the corresponding two-line
entry. In the sample output above, the first entry covers
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index a58b63d..f51861b 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -45,11 +45,22 @@ directory apei/einj. The following files are provided.
injection. Before this, please specify all necessary error
parameters.
+- flags
+ Present for kernel version 3.13 and above. Used to specify which
+ of param{1..4} are valid and should be used by BIOS during injection.
+ Value is a bitmask as specified in ACPI5.0 spec for the
+ SET_ERROR_TYPE_WITH_ADDRESS data structure:
+ Bit 0 - Processor APIC field valid (see param3 below)
+ Bit 1 - Memory address and mask valid (param1 and param2)
+ Bit 2 - PCIe (seg,bus,dev,fn) valid (param4 below)
+ If set to zero, legacy behaviour is used where the type of injection
+ specifies just one bit set, and param1 is multiplexed.
+
- param1
This file is used to set the first error parameter value. Effect of
parameter depends on error_type specified. For example, if error
type is memory related type, the param1 should be a valid physical
- memory address.
+ memory address. [Unless "flag" is set - see above]
- param2
This file is used to set the second error parameter value. Effect of
@@ -58,6 +69,12 @@ directory apei/einj. The following files are provided.
address mask. Linux requires page or narrower granularity, say,
0xfffffffffffff000.
+- param3
+ Used when the 0x1 bit is set in "flag" to specify the APIC id
+
+- param4
+ Used when the 0x4 bit is set in "flag" to specify target PCIe device
+
- notrigger
The EINJ mechanism is a two step process. First inject the error, then
perform some actions to trigger it. Setting "notrigger" to 1 skips the
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
new file mode 100644
index 0000000..b2830b4
--- /dev/null
+++ b/Documentation/block/null_blk.txt
@@ -0,0 +1,72 @@
+Null block device driver
+================================================================================
+
+I. Overview
+
+The null block device (/dev/nullb*) is used for benchmarking the various
+block-layer implementations. It emulates a block device of X gigabytes in size.
+The following instances are possible:
+
+ Single-queue block-layer
+ - Request-based.
+ - Single submission queue per device.
+ - Implements IO scheduling algorithms (CFQ, Deadline, noop).
+ Multi-queue block-layer
+ - Request-based.
+ - Configurable submission queues per device.
+ No block-layer (Known as bio-based)
+ - Bio-based. IO requests are submitted directly to the device driver.
+ - Directly accepts bio data structure and returns them.
+
+All of them have a completion queue for each core in the system.
+
+II. Module parameters applicable for all instances:
+
+queue_mode=[0-2]: Default: 2-Multi-queue
+ Selects which block-layer the module should instantiate with.
+
+ 0: Bio-based.
+ 1: Single-queue.
+ 2: Multi-queue.
+
+home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
+ Selects what CPU node the data structures are allocated from.
+
+gb=[Size in GB]: Default: 250GB
+ The size of the device reported to the system.
+
+bs=[Block size (in bytes)]: Default: 512 bytes
+ The block size reported to the system.
+
+nr_devices=[Number of devices]: Default: 2
+ Number of block devices instantiated. They are instantiated as /dev/nullb0,
+ etc.
+
+irq_mode=[0-2]: Default: 1-Soft-irq
+ The completion mode used for completing IOs to the block-layer.
+
+ 0: None.
+ 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
+ when IOs are issued from another CPU node than the home the device is
+ connected to.
+ 2: Timer: Waits a specific period (completion_nsec) for each IO before
+ completion.
+
+completion_nsec=[ns]: Default: 10.000ns
+ Combined with irq_mode=2 (timer). The time each completion event must wait.
+
+submit_queues=[0..nr_cpus]:
+ The number of submission queues attached to the device driver. If unset, it
+ defaults to 1 on single-queue and bio-based instances. For multi-queue,
+ it is ignored when use_per_node_hctx module parameter is 1.
+
+hw_queue_depth=[0..qdepth]: Default: 64
+ The hardware queue depth of the device.
+
+III: Multi-queue specific parameters
+
+use_per_node_hctx=[0/1]: Default: 0
+ 0: The number of submit queues are set to the value of the submit_queues
+ parameter.
+ 1: The multi-queue block layer is instantiated with a hardware dispatch
+ queue for each CPU node in the system.
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
index 8117e5b..88951b1 100644
--- a/Documentation/circular-buffers.txt
+++ b/Documentation/circular-buffers.txt
@@ -160,6 +160,7 @@ The producer will look something like this:
spin_lock(&producer_lock);
unsigned long head = buffer->head;
+ /* The spin_unlock() and next spin_lock() provide needed ordering. */
unsigned long tail = ACCESS_ONCE(buffer->tail);
if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
@@ -168,9 +169,8 @@ The producer will look something like this:
produce_item(item);
- smp_wmb(); /* commit the item before incrementing the head */
-
- buffer->head = (head + 1) & (buffer->size - 1);
+ smp_store_release(buffer->head,
+ (head + 1) & (buffer->size - 1));
/* wake_up() will make sure that the head is committed before
* waking anyone up */
@@ -183,9 +183,14 @@ This will instruct the CPU that the contents of the new item must be written
before the head index makes it available to the consumer and then instructs the
CPU that the revised head index must be written before the consumer is woken.
-Note that wake_up() doesn't have to be the exact mechanism used, but whatever
-is used must guarantee a (write) memory barrier between the update of the head
-index and the change of state of the consumer, if a change of state occurs.
+Note that wake_up() does not guarantee any sort of barrier unless something
+is actually awakened. We therefore cannot rely on it for ordering. However,
+there is always one element of the array left empty. Therefore, the
+producer must produce two elements before it could possibly corrupt the
+element currently being read by the consumer. Therefore, the unlock-lock
+pair between consecutive invocations of the consumer provides the necessary
+ordering between the read of the index indicating that the consumer has
+vacated a given element and the write by the producer to that same element.
THE CONSUMER
@@ -195,21 +200,20 @@ The consumer will look something like this:
spin_lock(&consumer_lock);
- unsigned long head = ACCESS_ONCE(buffer->head);
+ /* Read index before reading contents at that index. */
+ unsigned long head = smp_load_acquire(buffer->head);
unsigned long tail = buffer->tail;
if (CIRC_CNT(head, tail, buffer->size) >= 1) {
- /* read index before reading contents at that index */
- smp_read_barrier_depends();
/* extract one item from the buffer */
struct item *item = buffer[tail];
consume_item(item);
- smp_mb(); /* finish reading descriptor before incrementing tail */
-
- buffer->tail = (tail + 1) & (buffer->size - 1);
+ /* Finish reading descriptor before incrementing tail. */
+ smp_store_release(buffer->tail,
+ (tail + 1) & (buffer->size - 1));
}
spin_unlock(&consumer_lock);
@@ -218,12 +222,17 @@ This will instruct the CPU to make sure the index is up to date before reading
the new item, and then it shall make sure the CPU has finished reading the item
before it writes the new tail pointer, which will erase the item.
-
-Note the use of ACCESS_ONCE() in both algorithms to read the opposition index.
-This prevents the compiler from discarding and reloading its cached value -
-which some compilers will do across smp_read_barrier_depends(). This isn't
-strictly needed if you can be sure that the opposition index will _only_ be
-used the once.
+Note the use of ACCESS_ONCE() and smp_load_acquire() to read the
+opposition index. This prevents the compiler from discarding and
+reloading its cached value - which some compilers will do across
+smp_read_barrier_depends(). This isn't strictly needed if you can
+be sure that the opposition index will _only_ be used the once.
+The smp_load_acquire() additionally forces the CPU to order against
+subsequent memory references. Similarly, smp_store_release() is used
+in both algorithms to write the thread's index. This documents the
+fact that we are writing to something that can be read concurrently,
+prevents the compiler from tearing the store, and enforces ordering
+against previous accesses.
===============
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 1196290..78530e6 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -20,6 +20,10 @@ TC/TCLIB Timer required properties:
- interrupts: Should contain all interrupts for the TC block
Note that you can specify several interrupt cells if the TC
block has one interrupt per channel.
+- clock-names: tuple listing input clock names.
+ Required elements: "t0_clk"
+ Optional elements: "t1_clk", "t2_clk"
+- clocks: phandles to input clocks.
Examples:
@@ -28,6 +32,8 @@ One interrupt per TC block:
compatible = "atmel,at91rm9200-tcb";
reg = <0xfff7c000 0x100>;
interrupts = <18 4>;
+ clocks = <&tcb0_clk>;
+ clock-names = "t0_clk";
};
One interrupt per TC channel in a TC block:
@@ -35,6 +41,8 @@ One interrupt per TC channel in a TC block:
compatible = "atmel,at91rm9200-tcb";
reg = <0xfffdc000 0x100>;
interrupts = <26 4 27 4 28 4>;
+ clocks = <&tcb1_clk>;
+ clock-names = "t0_clk";
};
RSTC Reset Controller required properties:
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 46f5c79..0f2f920 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -159,6 +159,8 @@ clock which they consume.
mixer 343
hdmi 344
g2d 345
+ mdma0 346
+ smmu_mdma0 347
[Clock Muxes]
diff --git a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
index 7dab6a8..45414bb 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
@@ -2,7 +2,11 @@ EXTCON FOR PALMAS/TWL CHIPS
PALMAS USB COMPARATOR
Required Properties:
- - compatible : Should be "ti,palmas-usb" or "ti,twl6035-usb"
+ - compatible: should contain one of:
+ * "ti,palmas-usb-vid".
+ * "ti,twl6035-usb-vid".
+ * "ti,palmas-usb" (DEPRECATED - use "ti,palmas-usb-vid").
+ * "ti,twl6035-usb" (DEPRECATED - use "ti,twl6035-usb-vid").
Optional Properties:
- ti,wakeup : To enable the wakeup comparator in probe
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index b1cb341..c65f71c 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -16,6 +16,7 @@ adt7461 +/-1C TDM Extended Temp Range I.C
at,24c08 i2c serial eeprom (24cxx)
atmel,24c02 i2c serial eeprom (24cxx)
atmel,at97sc3204t i2c trusted platform module (TPM)
+capella,cm32181 CM32181: Ambient Light Sensor
catalyst,24c32 i2c serial eeprom
dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
dallas,ds1338 I2C RTC with 56-Byte NV RAM
diff --git a/Documentation/devicetree/bindings/iio/humidity/dht11.txt b/Documentation/devicetree/bindings/iio/humidity/dht11.txt
new file mode 100644
index 0000000..ecc24c19
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/dht11.txt
@@ -0,0 +1,14 @@
+* DHT11 humidity/temperature sensor (and compatibles like DHT22)
+
+Required properties:
+ - compatible: Should be "dht11"
+ - gpios: Should specify the GPIO connected to the sensor's data
+ line, see "gpios property" in
+ Documentation/devicetree/bindings/gpio/gpio.txt.
+
+Example:
+
+humidity_sensor {
+ compatible = "dht11";
+ gpios = <&gpio0 6 0>;
+}
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2563.txt b/Documentation/devicetree/bindings/iio/light/tsl2563.txt
new file mode 100644
index 0000000..f91e809
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/tsl2563.txt
@@ -0,0 +1,19 @@
+* AMS TAOS TSL2563 ambient light sensor
+
+Required properties:
+
+ - compatible : should be "amstaos,tsl2563"
+ - reg : the I2C address of the sensor
+
+Optional properties:
+
+ - amstaos,cover-comp-gain : integer used as multiplier for gain
+ compensation (default = 1)
+
+Example:
+
+tsl2563@29 {
+ compatible = "amstaos,tsl2563";
+ reg = <0x29>;
+ amstaos,cover-comp-gain = <16>;
+};
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt b/Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt
new file mode 100644
index 0000000..90d5f34
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/magnetometer/hmc5843.txt
@@ -0,0 +1,17 @@
+* Honeywell HMC5843 magnetometer sensor
+
+Required properties:
+
+ - compatible : should be "honeywell,hmc5843"
+ - reg : the I2C address of the magnetometer - typically 0x1e
+
+Optional properties:
+
+ - gpios : should be device tree identifier of the magnetometer DRDY pin
+
+Example:
+
+hmc5843@1e {
+ compatible = "honeywell,hmc5843"
+ reg = <0x1e>;
+};
diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
index a45ae08..60960b2 100644
--- a/Documentation/devicetree/bindings/misc/atmel-ssc.txt
+++ b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
@@ -6,6 +6,9 @@ Required properties:
- atmel,at91sam9g45-ssc: support dma transfer
- reg: Should contain SSC registers location and length
- interrupts: Should contain SSC interrupt
+- clock-names: tuple listing input clock names.
+ Required elements: "pclk"
+- clocks: phandles to input clocks.
Required properties for devices compatible with "atmel,at91sam9g45-ssc":
@@ -20,6 +23,8 @@ ssc0: ssc@fffbc000 {
compatible = "atmel,at91rm9200-ssc";
reg = <0xfffbc000 0x4000>;
interrupts = <14 4 5>;
+ clocks = <&ssc0_clk>;
+ clock-names = "pclk";
};
- DMA transfer:
diff --git a/Documentation/devicetree/bindings/misc/bmp085.txt b/Documentation/devicetree/bindings/misc/bmp085.txt
index 91dfda2..d7a6deb 100644
--- a/Documentation/devicetree/bindings/misc/bmp085.txt
+++ b/Documentation/devicetree/bindings/misc/bmp085.txt
@@ -8,6 +8,8 @@ Optional properties:
- temp-measurement-period: temperature measurement period (milliseconds)
- default-oversampling: default oversampling value to be used at startup,
value range is 0-3 with rising sensitivity.
+- interrupt-parent: should be the phandle for the interrupt controller
+- interrupts: interrupt mapping for IRQ
Example:
@@ -17,4 +19,6 @@ pressure@77 {
chip-id = <10>;
temp-measurement-period = <100>;
default-oversampling = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
};
diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt
index 2191dcb..9c5d19a 100644
--- a/Documentation/devicetree/bindings/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt
@@ -6,6 +6,9 @@ Required properties:
additional mode or an USART new feature.
- reg: Should contain registers location and length
- interrupts: Should contain interrupt
+- clock-names: tuple listing input clock names.
+ Required elements: "usart"
+- clocks: phandles to input clocks.
Optional properties:
- atmel,use-dma-rx: use of PDC or DMA for receiving data
@@ -26,6 +29,8 @@ Example:
compatible = "atmel,at91sam9260-usart";
reg = <0xfff8c000 0x4000>;
interrupts = <7>;
+ clocks = <&usart0_clk>;
+ clock-names = "usart";
atmel,use-dma-rx;
atmel,use-dma-tx;
};
@@ -35,6 +40,8 @@ Example:
compatible = "atmel,at91sam9260-usart";
reg = <0xf001c000 0x100>;
interrupts = <12 4 5>;
+ clocks = <&usart0_clk>;
+ clock-names = "usart";
atmel,use-dma-rx;
atmel,use-dma-tx;
dmas = <&dma0 2 0x3>,
diff --git a/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt b/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt
new file mode 100644
index 0000000..12f3cf8
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt
@@ -0,0 +1,28 @@
+* Cirrus Logic CLPS711X Universal Asynchronous Receiver/Transmitter (UART)
+
+Required properties:
+- compatible: Should be "cirrus,clps711x-uart".
+- reg: Address and length of the register set for the device.
+- interrupts: Should contain UART TX and RX interrupt.
+- clocks: Should contain UART core clock number.
+- syscon: Phandle to SYSCON node, which contain UART control bits.
+
+Optional properties:
+- uart-use-ms: Indicate the UART has modem signal (DCD, DSR, CTS).
+
+Note: Each UART port should have an alias correctly numbered
+in "aliases" node.
+
+Example:
+ aliases {
+ serial0 = &uart1;
+ };
+
+ uart1: uart@80000480 {
+ compatible = "cirrus,clps711x-uart";
+ reg = <0x80000480 0x80>;
+ interrupts = <12 13>;
+ clocks = <&clks 11>;
+ syscon = <&syscon1>;
+ uart-use-ms;
+ };
diff --git a/Documentation/devicetree/bindings/staging/xillybus.txt b/Documentation/devicetree/bindings/staging/xillybus.txt
new file mode 100644
index 0000000..9e316dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/xillybus.txt
@@ -0,0 +1,20 @@
+* Xillybus driver for generic FPGA interface
+
+Required properties:
+- compatible: Should be "xillybus,xillybus-1.00.a"
+- reg: Address and length of the register set for the device
+- interrupts: Contains one interrupt node, typically consisting of three cells.
+- interrupt-parent: the phandle for the interrupt controller that
+ services interrupts for this device.
+
+Optional properties:
+- dma-coherent: Present if DMA operations are coherent
+
+Example:
+
+ xillybus@ff200400 {
+ compatible = "xillybus,xillybus-1.00.a";
+ reg = < 0xff200400 0x00000080 >;
+ interrupts = < 0 40 1 >;
+ interrupt-parent = <&intc>;
+ } ;
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
new file mode 100644
index 0000000..7c26154
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
@@ -0,0 +1,22 @@
+Allwinner SoCs High Speed Timer Controller
+
+Required properties:
+
+- compatible : should be "allwinner,sun5i-a13-hstimer" or
+ "allwinner,sun7i-a20-hstimer"
+- reg : Specifies base physical address and size of the registers.
+- interrupts : The interrupts of these timers (2 for the sun5i IP, 4 for the sun7i
+ one)
+- clocks: phandle to the source clock (usually the AHB clock)
+
+Example:
+
+timer@01c60000 {
+ compatible = "allwinner,sun7i-a20-hstimer";
+ reg = <0x01c60000 0x1000>;
+ interrupts = <0 51 1>,
+ <0 52 1>,
+ <0 53 1>,
+ <0 54 1>;
+ clocks = <&ahb1_gates 19>;
+};
diff --git a/Documentation/devicetree/bindings/staging/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index a1753ed..b8b6871 100644
--- a/Documentation/devicetree/bindings/staging/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -2,7 +2,9 @@ Platform DesignWare HS OTG USB 2.0 controller
-----------------------------------------------------
Required properties:
-- compatible : "snps,dwc2"
+- compatible : One of:
+ - brcm,bcm2835-usb: The DWC2 USB controller instance in the BCM2835 SoC.
+ - snps,dwc2: A generic DWC2 USB controller with default parameters.
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt
- clocks: clock provider specifier
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index edbb8d8..f29cd78 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -9,6 +9,7 @@ aeroflexgaisler Aeroflex Gaisler AB
ak Asahi Kasei Corp.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
+amstaos AMS-Taos Inc.
apm Applied Micro Circuits Corporation (APM)
arm ARM Ltd.
atmel Atmel Corporation
diff --git a/Documentation/driver-model/design-patterns.txt b/Documentation/driver-model/design-patterns.txt
new file mode 100644
index 0000000..ba7b2df
--- /dev/null
+++ b/Documentation/driver-model/design-patterns.txt
@@ -0,0 +1,116 @@
+
+Device Driver Design Patterns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This document describes a few common design patterns found in device drivers.
+It is likely that subsystem maintainers will ask driver developers to
+conform to these design patterns.
+
+1. State Container
+2. container_of()
+
+
+1. State Container
+~~~~~~~~~~~~~~~~~~
+
+While the kernel contains a few device drivers that assume that they will
+only be probed() once on a certain system (singletons), it is custom to assume
+that the device the driver binds to will appear in several instances. This
+means that the probe() function and all callbacks need to be reentrant.
+
+The most common way to achieve this is to use the state container design
+pattern. It usually has this form:
+
+struct foo {
+ spinlock_t lock; /* Example member */
+ (...)
+};
+
+static int foo_probe(...)
+{
+ struct foo *foo;
+
+ foo = devm_kzalloc(dev, sizeof(*foo), GFP_KERNEL);
+ if (!foo)
+ return -ENOMEM;
+ spin_lock_init(&foo->lock);
+ (...)
+}
+
+This will create an instance of struct foo in memory every time probe() is
+called. This is our state container for this instance of the device driver.
+Of course it is then necessary to always pass this instance of the
+state around to all functions that need access to the state and its members.
+
+For example, if the driver is registering an interrupt handler, you would
+pass around a pointer to struct foo like this:
+
+static irqreturn_t foo_handler(int irq, void *arg)
+{
+ struct foo *foo = arg;
+ (...)
+}
+
+static int foo_probe(...)
+{
+ struct foo *foo;
+
+ (...)
+ ret = request_irq(irq, foo_handler, 0, "foo", foo);
+}
+
+This way you always get a pointer back to the correct instance of foo in
+your interrupt handler.
+
+
+2. container_of()
+~~~~~~~~~~~~~~~~~
+
+Continuing on the above example we add an offloaded work:
+
+struct foo {
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ struct work_struct offload;
+ (...)
+};
+
+static void foo_work(struct work_struct *work)
+{
+ struct foo *foo = container_of(work, struct foo, offload);
+
+ (...)
+}
+
+static irqreturn_t foo_handler(int irq, void *arg)
+{
+ struct foo *foo = arg;
+
+ queue_work(foo->wq, &foo->offload);
+ (...)
+}
+
+static int foo_probe(...)
+{
+ struct foo *foo;
+
+ foo->wq = create_singlethread_workqueue("foo-wq");
+ INIT_WORK(&foo->offload, foo_work);
+ (...)
+}
+
+The design pattern is the same for an hrtimer or something similar that will
+return a single argument which is a pointer to a struct member in the
+callback.
+
+container_of() is a macro defined in <linux/kernel.h>
+
+What container_of() does is to obtain a pointer to the containing struct from
+a pointer to a member by a simple subtraction using the offsetof() macro from
+standard C, which allows something similar to object oriented behaviours.
+Notice that the contained member must not be a pointer, but an actual member
+for this to work.
+
+We can see here that we avoid having global pointers to our struct foo *
+instance this way, while still keeping the number of parameters passed to the
+work function to a single pointer.
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 5bdc8cb..4f7897e 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -242,6 +242,8 @@ IIO
devm_iio_device_free()
devm_iio_trigger_alloc()
devm_iio_trigger_free()
+ devm_iio_device_register()
+ devm_iio_device_unregister()
IO region
devm_request_region()
diff --git a/Documentation/extcon/porting-android-switch-class b/Documentation/extcon/porting-android-switch-class
index 5377f63..49c81ca 100644
--- a/Documentation/extcon/porting-android-switch-class
+++ b/Documentation/extcon/porting-android-switch-class
@@ -50,7 +50,7 @@ so that they are still compatible with legacy userspace processes.
Extcon's extended features for switch device drivers with
complex features usually required magic numbers in state
value of switch_dev. With extcon, such magic numbers that
- support multiple cables (
+ support multiple cables are no more required or supported.
1. Define cable names at edev->supported_cable.
2. (Recommended) remove print_state callback.
@@ -114,11 +114,8 @@ exclusive, the two cables cannot be in ATTACHED state simulteneously.
****** ABI Location
- If "CONFIG_ANDROID" is enabled and "CONFIG_ANDROID_SWITCH" is
-disabled, /sys/class/switch/* are created as symbolic links to
-/sys/class/extcon/*. Because CONFIG_ANDROID_SWITCH creates
-/sys/class/switch directory, we disable symboling linking if
-CONFIG_ANDROID_SWITCH is enabled.
+ If "CONFIG_ANDROID" is enabled, /sys/class/switch/* are created
+as symbolic links to /sys/class/extcon/*.
The two files of switch class, name and state, are provided with
extcon, too. When the multistate support (STEP 2 of CHAPTER 1.) is
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
index 8148a47..0091a82 100644
--- a/Documentation/ja_JP/HOWTO
+++ b/Documentation/ja_JP/HOWTO
@@ -149,7 +149,7 @@ linux-api@ver.kernel.org ã«é€ã‚‹ã“ã¨ã‚’勧ã‚ã¾ã™ã€‚
ã“ã®ä»–ã«ãƒ‘ッãƒã‚’作る方法ã«ã¤ã„ã¦ã®ã‚ˆãã§ããŸè¨˜è¿°ã¯-
"The Perfect Patch"
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
@@ -622,7 +622,7 @@ Linux カーãƒãƒ«ã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã¯ã€ä¸€åº¦ã«å¤§é‡ã®ã‚³ãƒ¼ãƒ‰ã®å¡Šã‚’å–
ã“ã‚Œã«ã¤ã„ã¦å…¨ã¦ãŒã©ã®ã‚ˆã†ã«ã‚ã‚‹ã¹ãã‹ã«ã¤ã„ã¦ã®è©³ç´°ã¯ã€ä»¥ä¸‹ã®ãƒ‰ã‚­ãƒ¥ãƒ¡
ント㮠ChangeLog セクションを見ã¦ãã ã•ã„-
"The Perfect Patch"
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
ã“れらã®ã©ã‚Œã‚‚ãŒã€æ™‚ã«ã¯ã¨ã¦ã‚‚困難ã§ã™ã€‚ã“れらã®æ…£ä¾‹ã‚’完璧ã«å®Ÿæ–½ã™ã‚‹ã«
ã¯æ•°å¹´ã‹ã‹ã‚‹ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。ã“ã‚Œã¯ç¶™ç¶šçš„ãªæ”¹å–„ã®ãƒ—ロセスã§ã‚ã‚Šã€ãã®ãŸ
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 50680a5..4252af6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -774,6 +774,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
disable= [IPV6]
See Documentation/networking/ipv6.txt.
+ disable_cpu_apicid= [X86,APIC,SMP]
+ Format: <int>
+ The number of initial APIC ID for the
+ corresponding CPU to be disabled at boot,
+ mostly used for the kdump 2nd kernel to
+ disable BSP to wake up multiple CPUs without
+ causing system reset or hang due to sending
+ INIT from AP to BSP.
+
disable_ddw [PPC/PSERIES]
Disable Dynamic DMA Window support. Use this if
to workaround buggy firmware.
@@ -881,6 +890,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The xen output can only be used by Xen PV guests.
+ edac_report= [HW,EDAC] Control how to report EDAC event
+ Format: {"on" | "off" | "force"}
+ on: enable EDAC to report H/W event. May be overridden
+ by other higher priority error reporting module.
+ off: disable H/W event reporting through EDAC.
+ force: enforce the use of EDAC to report H/W event.
+ default: on.
+
ekgdboc= [X86,KGDB] Allow early kernel console debugging
ekgdboc=kbd
@@ -890,6 +907,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
edd= [EDD]
Format: {"off" | "on" | "skip[mbr]"}
+ efi= [EFI]
+ Format: { "old_map" }
+ old_map [X86-64]: switch to the old ioremap-based EFI
+ runtime services mapping. 32-bit still uses this one by
+ default.
+
efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of
your efi variable storage. Use this parameter only if
@@ -1529,6 +1552,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
* atapi_dmadir: Enable ATAPI DMADIR bridge support
+ * disable: Disable this device.
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
@@ -1992,6 +2017,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
+ nokaslr [X86]
+ Disable kernel base offset ASLR (Address Space
+ Layout Randomization) if built into the kernel.
+
noautogroup Disable scheduler automatic task group creation.
nobats [PPC] Do not use BATs for mapping kernel lowmem
@@ -2625,7 +2654,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
for RCU-preempt, and "s" for RCU-sched, and "N"
is the CPU number. This reduces OS jitter on the
offloaded CPUs, which can be useful for HPC and
-
real-time workloads. It can also improve energy
efficiency for asymmetric multiprocessors.
@@ -2641,8 +2669,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
periodically wake up to do the polling.
rcutree.blimit= [KNL]
- Set maximum number of finished RCU callbacks to process
- in one batch.
+ Set maximum number of finished RCU callbacks to
+ process in one batch.
rcutree.rcu_fanout_leaf= [KNL]
Increase the number of CPUs assigned to each
@@ -2661,8 +2689,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
value is one, and maximum value is HZ.
rcutree.qhimark= [KNL]
- Set threshold of queued
- RCU callbacks over which batch limiting is disabled.
+ Set threshold of queued RCU callbacks beyond which
+ batch limiting is disabled.
rcutree.qlowmark= [KNL]
Set threshold of queued RCU callbacks below which
diff --git a/Documentation/kmsg/s390/zcrypt b/Documentation/kmsg/s390/zcrypt
new file mode 100644
index 0000000..7fb2087
--- /dev/null
+++ b/Documentation/kmsg/s390/zcrypt
@@ -0,0 +1,20 @@
+/*?
+ * Text: "Cryptographic device %x failed and was set offline\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device index
+ * Description:
+ * A cryptographic device failed to process a cryptographic request.
+ * The cryptographic device driver could not correct the error and
+ * set the device offline. The application that issued the
+ * request received an indication that the request has failed.
+ * User action:
+ * Use the lszcrypt command to confirm that the cryptographic
+ * hardware is still configured to your LPAR or z/VM guest virtual
+ * machine. If the device is available to your Linux instance the
+ * command output contains a line that begins with 'card<device index>',
+ * where <device index> is the two-digit decimal number in the message text.
+ * After ensuring that the device is available, use the chzcrypt command to
+ * set it online again.
+ * If the error persists, contact your support organization.
+ */
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO
index 680e646..dc2ff8f 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/ko_KR/HOWTO
@@ -122,7 +122,7 @@ mtk.manpages@gmail.comì˜ ë©”ì¸í…Œì´ë„ˆì—게 보낼 ê²ƒì„ ê¶Œìž¥í•œë‹¤.
올바른 íŒ¨ì¹˜ë“¤ì„ ë§Œë“œëŠ” ë²•ì— ê´€í•œ 훌륭한 다른 ë¬¸ì„œë“¤ì´ ìžˆë‹¤.
"The Perfect Patch"
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
@@ -213,7 +213,7 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
ê²ƒì€ Linux Cross-Reference projectì´ë©° ê·¸ê²ƒì€ ìžê¸° 참조 ë°©ì‹ì´ë©°
소스코드를 ì¸ë±ìŠ¤ëœ 웹 페ì´ì§€ë“¤ì˜ 형태로 보여준다. ìµœì‹ ì˜ ë©‹ì§„ 커ë„
코드 저장소는 다ìŒì„ 통하여 참조할 수 있다.
- http://users.sosdg.org/~qiyong/lxr/
+ http://lxr.linux.no/+trees
개발 프로세스
@@ -222,20 +222,20 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
리눅스 ì»¤ë„ ê°œë°œ 프로세스는 현재 몇몇 다른 ë©”ì¸ ì»¤ë„ "브랜치들"ê³¼
ì„œë¸Œì‹œìŠ¤í…œì— íŠ¹í™”ëœ ì»¤ë„ ë¸Œëžœì¹˜ë“¤ë¡œ 구성ëœë‹¤. 몇몇 다른 ë©”ì¸
ë¸Œëžœì¹˜ë“¤ì€ ë‹¤ìŒê³¼ 같다.
- - main 2.6.x ì»¤ë„ íŠ¸ë¦¬
- - 2.6.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬
- - 2.6.x -git ì»¤ë„ íŒ¨ì¹˜ë“¤
- - 2.6.x -mm ì»¤ë„ íŒ¨ì¹˜ë“¤
+ - main 3.x ì»¤ë„ íŠ¸ë¦¬
+ - 3.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬
+ - 3.x -git ì»¤ë„ íŒ¨ì¹˜ë“¤
- ì„œë¸Œì‹œìŠ¤í…œì„ ìœ„í•œ ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
+ - 3.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
-2.6.x ì»¤ë„ íŠ¸ë¦¬
+3.x ì»¤ë„ íŠ¸ë¦¬
---------------
-2.6.x 커ë„ë“¤ì€ Linux Torvaldsê°€ 관리하며 kernel.orgì˜ pub/linux/kernel/v2.6/
+3.x 커ë„ë“¤ì€ Linux Torvaldsê°€ 관리하며 kernel.orgì˜ pub/linux/kernel/v3.x/
디렉토리ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다.
- 새로운 커ë„ì´ ë°°í¬ë˜ìžë§ˆìž 2ì£¼ì˜ ì‹œê°„ì´ ì£¼ì–´ì§„ë‹¤. ì´ ê¸°ê°„ë™ì€
ë©”ì¸í…Œì´ë„ˆë“¤ì€ í° diffë“¤ì„ Linusì—게 제출할 수 있다. 대개 ì´ íŒ¨ì¹˜ë“¤ì€
- 몇 주 ë™ì•ˆ -mm 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ” ë°
+ 몇 주 ë™ì•ˆ -next 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ” ë°
선호ë˜ëŠ” ë°©ë²•ì€ git(커ë„ì˜ ì†ŒìŠ¤ 관리 툴, ë” ë§Žì€ ì •ë³´ë“¤ì€ http://git.or.cz/
ì—ì„œ 참조할 수 있다)를 사용하는 것ì´ì§€ë§Œ 순수한 패치파ì¼ì˜ 형ì‹ìœ¼ë¡œ 보내는
ê²ƒë„ ë¬´ê´€í•˜ë‹¤.
@@ -262,20 +262,20 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
ë²„ê·¸ì˜ ìƒí™©ì— ë”°ë¼ ë°°í¬ë˜ëŠ” 것ì´ì§€ 미리정해 ë†“ì€ ì‹œê°„ì— ë”°ë¼
ë°°í¬ë˜ëŠ” ê²ƒì€ ì•„ë‹ˆê¸° 때문ì´ë‹¤."
-2.6.x.y - 안정 ì»¤ë„ íŠ¸ë¦¬
+3.x.y - 안정 ì»¤ë„ íŠ¸ë¦¬
------------------------
-4 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ 2.6.x
+3 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ 3.x
커ë„ì—ì„œ ë°œê²¬ëœ í° íšŒê·€ë“¤ì´ë‚˜ 보안 문제들 중 비êµì  ìž‘ê³  중요한 수정들ì„
í¬í•¨í•œë‹¤.
ì´ê²ƒì€ 가장 ìµœê·¼ì˜ ì•ˆì •ì ì¸ 커ë„ì„ ì›í•˜ëŠ” 사용ìžì—게 추천ë˜ëŠ” 브랜치ì´ë©°,
개발/ì‹¤í—˜ì  ë²„ì ¼ì„ í…ŒìŠ¤íŠ¸í•˜ëŠ” ê²ƒì„ ë•ê³ ìž 하는 사용ìžë“¤ê³¼ëŠ” 별로 ê´€ë ¨ì´ ì—†ë‹¤.
-ì–´ë–¤ 2.6.x.y 커ë„ë„ ì‚¬ìš©í•  수 없다면 그때는 가장 ë†’ì€ ìˆ«ìžì˜ 2.6.x
+ì–´ë–¤ 3.x.y 커ë„ë„ ì‚¬ìš©í•  수 없다면 그때는 가장 ë†’ì€ ìˆ«ìžì˜ 3.x
커ë„ì´ í˜„ìž¬ì˜ ì•ˆì • 커ë„ì´ë‹¤.
-2.6.x.y는 "stable" 팀<stable@kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로
+3.x.y는 "stable" 팀<stable@vger.kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로
ë°°í¬ëœë‹¤.
ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì— Documentation/stable_kernel_rules.txt 파ì¼ì€ ì–´ë–¤
@@ -283,84 +283,46 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
진행ë˜ëŠ”지를 설명한다.
-2.6.x -git 패치들
+3.x -git 패치들
------------------
git 저장소(그러므로 -gitì´ë¼ëŠ” ì´ë¦„ì´ ë¶™ìŒ)ì—는 날마다 관리ë˜ëŠ” Linusì˜
ì»¤ë„ íŠ¸ë¦¬ì˜ snapshot ë“¤ì´ ìžˆë‹¤. ì´ íŒ¨ì¹˜ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 날마다 ë°°í¬ë˜ë©°
Linusì˜ íŠ¸ë¦¬ì˜ í˜„ìž¬ ìƒíƒœë¥¼ 나타낸다. ì´ íŒ¨ì¹˜ë“¤ì€ ì •ìƒì ì¸ì§€ 조금ë„
살펴보지 ì•Šê³  ìžë™ì ìœ¼ë¡œ ìƒì„±ëœ 것ì´ë¯€ë¡œ -rc 커ë„들 ë³´ë‹¤ë„ ë” ì‹¤í—˜ì ì´ë‹¤.
-2.6.x -mm ì»¤ë„ íŒ¨ì¹˜ë“¤
----------------------
-Andrew Mortonì— ì˜í•´ ë°°í¬ëœ 실험ì ì¸ ì»¤ë„ íŒ¨ì¹˜ë“¤ì´ë‹¤. Andrew는 모든 다른
-서브시스템 ì»¤ë„ íŠ¸ë¦¬ì™€ íŒ¨ì¹˜ë“¤ì„ ê°€ì ¸ì™€ì„œ 리눅스 ì»¤ë„ ë©”ì¼ë§ 리스트로
-온 ë§Žì€ íŒ¨ì¹˜ë“¤ê³¼ í•œë° ë¬¶ëŠ”ë‹¤. ì´ íŠ¸ë¦¬ëŠ” 새로운 기능들과 íŒ¨ì¹˜ë“¤ì„ ìœ„í•œ
-장소를 제공하는 ì—­í• ì„ í•œë‹¤. í•˜ë‚˜ì˜ íŒ¨ì¹˜ê°€ -mmì— í•œë™ì•ˆ 있으면서 ê·¸ 가치가
-ì¦ëª…ë˜ê²Œ ë˜ë©´ Andrew나 서브시스템 ë©”ì¸í…Œì´ë„ˆëŠ” ê·¸ê²ƒì„ ë©”ì¸ë¼ì¸ì— í¬í•¨ì‹œí‚¤ê¸°
-위하여 Linusì—게 보낸다.
-
-ì»¤ë„ íŠ¸ë¦¬ì— í¬í•¨í•˜ê³  ì‹¶ì€ ëª¨ë“  새로운 íŒ¨ì¹˜ë“¤ì€ Linusì—게 보내지기 ì „ì—
--mm 트리ì—ì„œ 테스트를 하는 ê²ƒì„ ì ê·¹ 추천한다.
-
-ì´ ì»¤ë„ë“¤ì€ ì•ˆì •ë˜ê²Œ 사용할 시스템ì—ì„œì— ì‹¤í–‰í•˜ëŠ” ê²ƒì€ ì í•©í•˜ì§€ 않으며
-다른 ë¸Œëžœì¹˜ë“¤ì˜ ì–´ë–¤ 것들보다 위험하다.
-
-ì—¬ëŸ¬ë¶„ì´ ì»¤ë„ ê°œë°œ 프로세스를 ë•ê¸¸ ì›í•œë‹¤ë©´ ì´ ì»¤ë„ ë°°í¬ë“¤ì„ 사용하고
-테스트한 후 ì–´ë–¤ 문제를 발견하거나 ë˜ëŠ” 모든 ê²ƒì´ ìž˜ ë™ìž‘한다면 리눅스
-ì»¤ë„ ë©”ì¼ë§ 리스트로 í”¼ë“œë°±ì„ í•´ë‹¬ë¼.
-
-ì´ ì»¤ë„ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 모든 다른 실험ì ì¸ 패치들과 ë°°í¬ë  당시ì˜
-사용가능한 ë©”ì¸ë¼ì¸ -git 커ë„ë“¤ì˜ ëª‡ëª‡ ë³€ê²½ì„ í¬í•¨í•œë‹¤.
-
--mm 커ë„ë“¤ì€ ì •í•´ì§„ ì¼ì •ëŒ€ë¡œ ë°°í¬ë˜ì§€ 않는다. 하지만 대개 몇몇 -mm 커ë„들ì€
-ê° -rc 커ë„(1부터 3ì´ í”함) 사ì´ì—ì„œ ë°°í¬ëœë‹¤.
-
서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
-------------------------------
-ë§Žì€ ë‹¤ë¥¸ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œ 개발ìžë“¤ì€ 커ë„ì˜ ë‹¤ë¥¸ 부분들ì—ì„œ 무슨 ì¼ì´
-ì¼ì–´ë‚˜ê³  있는지를 볼수 있ë„ë¡ ê·¸ë“¤ì˜ ê°œë°œ 트리를 공개한다. ì´ íŠ¸ë¦¬ë“¤ì€
-위ì—ì„œ ì„¤ëª…í•˜ì˜€ë˜ ê²ƒ 처럼 -mm ì»¤ë„ ë°°í¬ë“¤ë¡œ í•©ì³ì§„다.
-
-다ìŒì€ 활용가능한 ì»¤ë„ íŠ¸ë¦¬ë“¤ì„ ë‚˜ì—´í•œë‹¤.
- git trees:
- - Kbuild development tree, Sam Ravnborg < sam@ravnborg.org>
- git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git
-
- - ACPI development tree, Len Brown <len.brown@intel.com >
- git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
-
- - Block development tree, Jens Axboe <jens.axboe@oracle.com>
- git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
-
- - DRM development tree, Dave Airlie <airlied@linux.ie>
- git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git
-
- - ia64 development tree, Tony Luck < tony.luck@intel.com>
- git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git
-
- - infiniband, Roland Dreier <rolandd@cisco.com >
- git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
-
- - libata, Jeff Garzik <jgarzik@pobox.com>
- git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git
-
- - network drivers, Jeff Garzik <jgarzik@pobox.com>
- git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git
-
- - pcmcia, Dominik Brodowski < linux@dominikbrodowski.net>
- git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
-
- - SCSI, James Bottomley < James.Bottomley@SteelEye.com>
- git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
-
- quilt trees:
- - USB, PCI, Driver Core, and I2C, Greg Kroah-Hartman < gregkh@linuxfoundation.org>
- kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
- - x86-64, partly i386, Andi Kleen < ak@suse.de>
- ftp.firstfloor.org:/pub/ak/x86_64/quilt/
-
- 다른 ì»¤ë„ íŠ¸ë¦¬ë“¤ì€ http://kernel.org/git와 MAINTAINERS 파ì¼ì—ì„œ 참조할 수
- 있다.
+다양한 ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ë©”ì¸í…Œì´ë„ˆë“¤ --- 그리고 ë§Žì€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œ 개발ìžë“¤
+--- ì€ ê·¸ë“¤ì˜ í˜„ìž¬ 개발 ìƒíƒœë¥¼ 소스 저장소로 노출한다. ì´ë¥¼ 통해 다른 사람들ë„
+커ë„ì˜ ë‹¤ë¥¸ ì˜ì—­ì— ì–´ë–¤ 변화가 ì´ë£¨ì–´ì§€ê³  있는지 ì•Œ 수 있다. 급ì†ížˆ 개발ì´
+진행ë˜ëŠ” ì˜ì—­ì´ 있고 그렇지 ì•Šì€ ì˜ì—­ì´ 있으므로, 개발ìžëŠ” 다른 개발ìžê°€ 제출한
+수정 사항과 ìžì‹ ì˜ ìˆ˜ì •ì‚¬í•­ì˜ ì¶©ëŒì´ë‚˜ ë™ì¼í•œ ì¼ì„ ë™ì‹œì— ë‘ì‚¬ëžŒì´ ë”°ë¡œ
+진행하는 사태를 방지하기 위해 급ì†ížˆ ê°œë°œì´ ì§„í–‰ë˜ê³  있는 ì˜ì—­ì— ìž‘ì—…ì˜
+ë² ì´ìŠ¤ë¥¼ 맞춰줄 ê²ƒì´ ìš”êµ¬ëœë‹¤.
+
+ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ 저장소는 git 트리지만, gitì´ ì•„ë‹Œ SCM으로 관리ë˜ê±°ë‚˜, quilt
+시리즈로 제공ë˜ëŠ” íŒ¨ì¹˜ë“¤ë„ ì¡´ìž¬í•œë‹¤. ì´ëŸ¬í•œ 서브시스템 ì €ìž¥ì†Œë“¤ì€ MAINTAINERS
+파ì¼ì— 나열ë˜ì–´ 있다. ëŒ€ë¶€ë¶„ì€ http://git.kernel.org ì—ì„œ ë³¼ 수 있다.
+
+ì œì•ˆëœ íŒ¨ì¹˜ëŠ” 서브시스템 íŠ¸ë¦¬ì— ì»¤ë°‹ë˜ê¸° ì „ì— ë©”ì¼ë§ 리스트를 통해
+리뷰ëœë‹¤(ì•„ëž˜ì˜ ê´€ë ¨ ì„¹ì…˜ì„ ì°¸ê³ í•˜ê¸° 바란다). ì¼ë¶€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ê²½ìš°, ì´
+리뷰 프로세스는 patchworkë¼ëŠ” ë„구를 통해 추ì ëœë‹¤. patchworkì€ ë“±ë¡ëœ 패치와
+íŒ¨ì¹˜ì— ëŒ€í•œ 코멘트, íŒ¨ì¹˜ì˜ ë²„ì „ì„ ë³¼ 수 있는 웹 ì¸í„°íŽ˜ì´ìŠ¤ë¥¼ 제공하고,
+ë©”ì¸í…Œì´ë„ˆëŠ” 패치를 리뷰 중, 리뷰 통과, ë˜ëŠ” 반려ë¨ìœ¼ë¡œ 표시할 수 있다.
+ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ patchwork 사ì´íŠ¸ëŠ” http://patchwork.kernel.org/ ë˜ëŠ”
+http://patchwork.ozlabs.org/ ì— ë‚˜ì—´ë˜ì–´ 있다.
+
+3.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
+-----------------------------------------
+서브시스템 íŠ¸ë¦¬ë“¤ì˜ ë³€ê²½ì‚¬í•­ë“¤ì€ mainline 3.x 트리로 들어오기 ì „ì— í†µí•©
+테스트를 ê±°ì³ì•¼ 한다. ì´ëŸ° 목ì ìœ¼ë¡œ, 모든 서브시스템 íŠ¸ë¦¬ì˜ ë³€ê²½ì‚¬í•­ì„ ê±°ì˜
+ë§¤ì¼ ë°›ì•„ê°€ëŠ” 특수한 테스트 저장소가 존재한다:
+ http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
+ http://linux.f-seidel.de/linux-next/pmwiki/
+
+ì´ëŸ° ì‹ìœ¼ë¡œ, -next 커ë„ì„ í†µí•´ ë‹¤ìŒ ë¨¸ì§€ ê¸°ê°„ì— ë©”ì¸ë¼ì¸ 커ë„ì— ì–´ë–¤ 변경ì´
+가해질 것ì¸ì§€ 간략히 ì•Œ 수 있다. 모험심 ê°•í•œ 테스터ë¼ë©´ -next 커ë„ì—ì„œ 테스트를
+수행하는 ê²ƒë„ ì¢‹ì„ ê²ƒì´ë‹¤.
버그 보고
---------
@@ -597,7 +559,7 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
ì´ê²ƒì´ 무엇ì¸ì§€ ë” ìžì„¸í•œ ê²ƒì„ ì•Œê³  싶다면 ë‹¤ìŒ ë¬¸ì„œì˜ ChageLog í•­ì„ ë´ë¼.
"The Perfect Patch"
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index c5182bb..f87241d 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -342,7 +342,10 @@ kset use:
When you are finished with the kset, call:
void kset_unregister(struct kset *kset);
-to destroy it.
+to destroy it. This removes the kset from sysfs and decrements its reference
+count. When the reference count goes to zero, the kset will be released.
+Because other references to the kset may still exist, the release may happen
+after kset_unregister() returns.
An example of using a kset can be seen in the
samples/kobject/kset-example.c file in the kernel tree.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c8c42e6..102dc19 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -194,18 +194,22 @@ There are some minimal guarantees that may be expected of a CPU:
(*) On any given CPU, dependent memory accesses will be issued in order, with
respect to itself. This means that for:
- Q = P; D = *Q;
+ ACCESS_ONCE(Q) = P; smp_read_barrier_depends(); D = ACCESS_ONCE(*Q);
the CPU will issue the following memory operations:
Q = LOAD P, D = LOAD *Q
- and always in that order.
+ and always in that order. On most systems, smp_read_barrier_depends()
+ does nothing, but it is required for DEC Alpha. The ACCESS_ONCE()
+ is required to prevent compiler mischief. Please note that you
+ should normally use something like rcu_dereference() instead of
+ open-coding smp_read_barrier_depends().
(*) Overlapping loads and stores within a particular CPU will appear to be
ordered within that CPU. This means that for:
- a = *X; *X = b;
+ a = ACCESS_ONCE(*X); ACCESS_ONCE(*X) = b;
the CPU will only issue the following sequence of memory operations:
@@ -213,7 +217,7 @@ There are some minimal guarantees that may be expected of a CPU:
And for:
- *X = c; d = *X;
+ ACCESS_ONCE(*X) = c; d = ACCESS_ONCE(*X);
the CPU will only issue:
@@ -224,6 +228,12 @@ There are some minimal guarantees that may be expected of a CPU:
And there are a number of things that _must_ or _must_not_ be assumed:
+ (*) It _must_not_ be assumed that the compiler will do what you want with
+ memory references that are not protected by ACCESS_ONCE(). Without
+ ACCESS_ONCE(), the compiler is within its rights to do all sorts
+ of "creative" transformations, which are covered in the Compiler
+ Barrier section.
+
(*) It _must_not_ be assumed that independent loads and stores will be issued
in the order given. This means that for:
@@ -371,33 +381,44 @@ Memory barriers come in four basic varieties:
And a couple of implicit varieties:
- (5) LOCK operations.
+ (5) ACQUIRE operations.
This acts as a one-way permeable barrier. It guarantees that all memory
- operations after the LOCK operation will appear to happen after the LOCK
- operation with respect to the other components of the system.
+ operations after the ACQUIRE operation will appear to happen after the
+ ACQUIRE operation with respect to the other components of the system.
+ ACQUIRE operations include LOCK operations and smp_load_acquire()
+ operations.
- Memory operations that occur before a LOCK operation may appear to happen
- after it completes.
+ Memory operations that occur before an ACQUIRE operation may appear to
+ happen after it completes.
- A LOCK operation should almost always be paired with an UNLOCK operation.
+ An ACQUIRE operation should almost always be paired with a RELEASE
+ operation.
- (6) UNLOCK operations.
+ (6) RELEASE operations.
This also acts as a one-way permeable barrier. It guarantees that all
- memory operations before the UNLOCK operation will appear to happen before
- the UNLOCK operation with respect to the other components of the system.
+ memory operations before the RELEASE operation will appear to happen
+ before the RELEASE operation with respect to the other components of the
+ system. RELEASE operations include UNLOCK operations and
+ smp_store_release() operations.
- Memory operations that occur after an UNLOCK operation may appear to
+ Memory operations that occur after a RELEASE operation may appear to
happen before it completes.
- LOCK and UNLOCK operations are guaranteed to appear with respect to each
- other strictly in the order specified.
+ The use of ACQUIRE and RELEASE operations generally precludes the need
+ for other sorts of memory barrier (but note the exceptions mentioned in
+ the subsection "MMIO write barrier"). In addition, a RELEASE+ACQUIRE
+ pair is -not- guaranteed to act as a full memory barrier. However, after
+ an ACQUIRE on a given variable, all memory accesses preceding any prior
+ RELEASE on that same variable are guaranteed to be visible. In other
+ words, within a given variable's critical section, all accesses of all
+ previous critical sections for that variable are guaranteed to have
+ completed.
- The use of LOCK and UNLOCK operations generally precludes the need for
- other sorts of memory barrier (but note the exceptions mentioned in the
- subsection "MMIO write barrier").
+ This means that ACQUIRE acts as a minimal "acquire" operation and
+ RELEASE acts as a minimal "release" operation.
Memory barriers are only required where there's a possibility of interaction
@@ -450,14 +471,14 @@ The usage requirements of data dependency barriers are a little subtle, and
it's not always obvious that they're needed. To illustrate, consider the
following sequence of events:
- CPU 1 CPU 2
- =============== ===============
+ CPU 1 CPU 2
+ =============== ===============
{ A == 1, B == 2, C = 3, P == &A, Q == &C }
B = 4;
<write barrier>
- P = &B
- Q = P;
- D = *Q;
+ ACCESS_ONCE(P) = &B
+ Q = ACCESS_ONCE(P);
+ D = *Q;
There's a clear data dependency here, and it would seem that by the end of the
sequence, Q must be either &A or &B, and that:
@@ -477,15 +498,15 @@ Alpha).
To deal with this, a data dependency barrier or better must be inserted
between the address load and the data load:
- CPU 1 CPU 2
- =============== ===============
+ CPU 1 CPU 2
+ =============== ===============
{ A == 1, B == 2, C = 3, P == &A, Q == &C }
B = 4;
<write barrier>
- P = &B
- Q = P;
- <data dependency barrier>
- D = *Q;
+ ACCESS_ONCE(P) = &B
+ Q = ACCESS_ONCE(P);
+ <data dependency barrier>
+ D = *Q;
This enforces the occurrence of one of the two implications, and prevents the
third possibility from arising.
@@ -500,25 +521,26 @@ odd-numbered bank is idle, one can see the new value of the pointer P (&B),
but the old value of the variable B (2).
-Another example of where data dependency barriers might by required is where a
+Another example of where data dependency barriers might be required is where a
number is read from memory and then used to calculate the index for an array
access:
- CPU 1 CPU 2
- =============== ===============
+ CPU 1 CPU 2
+ =============== ===============
{ M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
M[1] = 4;
<write barrier>
- P = 1
- Q = P;
- <data dependency barrier>
- D = M[Q];
+ ACCESS_ONCE(P) = 1
+ Q = ACCESS_ONCE(P);
+ <data dependency barrier>
+ D = M[Q];
-The data dependency barrier is very important to the RCU system, for example.
-See rcu_dereference() in include/linux/rcupdate.h. This permits the current
-target of an RCU'd pointer to be replaced with a new modified target, without
-the replacement target appearing to be incompletely initialised.
+The data dependency barrier is very important to the RCU system,
+for example. See rcu_assign_pointer() and rcu_dereference() in
+include/linux/rcupdate.h. This permits the current target of an RCU'd
+pointer to be replaced with a new modified target, without the replacement
+target appearing to be incompletely initialised.
See also the subsection on "Cache Coherency" for a more thorough example.
@@ -530,24 +552,190 @@ A control dependency requires a full read memory barrier, not simply a data
dependency barrier to make it work correctly. Consider the following bit of
code:
- q = &a;
- if (p) {
- <data dependency barrier>
- q = &b;
+ q = ACCESS_ONCE(a);
+ if (q) {
+ <data dependency barrier> /* BUG: No data dependency!!! */
+ p = ACCESS_ONCE(b);
}
- x = *q;
This will not have the desired effect because there is no actual data
-dependency, but rather a control dependency that the CPU may short-circuit by
-attempting to predict the outcome in advance. In such a case what's actually
-required is:
+dependency, but rather a control dependency that the CPU may short-circuit
+by attempting to predict the outcome in advance, so that other CPUs see
+the load from b as having happened before the load from a. In such a
+case what's actually required is:
- q = &a;
- if (p) {
+ q = ACCESS_ONCE(a);
+ if (q) {
<read barrier>
- q = &b;
+ p = ACCESS_ONCE(b);
+ }
+
+However, stores are not speculated. This means that ordering -is- provided
+in the following example:
+
+ q = ACCESS_ONCE(a);
+ if (ACCESS_ONCE(q)) {
+ ACCESS_ONCE(b) = p;
+ }
+
+Please note that ACCESS_ONCE() is not optional! Without the ACCESS_ONCE(),
+the compiler is within its rights to transform this example:
+
+ q = a;
+ if (q) {
+ b = p; /* BUG: Compiler can reorder!!! */
+ do_something();
+ } else {
+ b = p; /* BUG: Compiler can reorder!!! */
+ do_something_else();
+ }
+
+into this, which of course defeats the ordering:
+
+ b = p;
+ q = a;
+ if (q)
+ do_something();
+ else
+ do_something_else();
+
+Worse yet, if the compiler is able to prove (say) that the value of
+variable 'a' is always non-zero, it would be well within its rights
+to optimize the original example by eliminating the "if" statement
+as follows:
+
+ q = a;
+ b = p; /* BUG: Compiler can reorder!!! */
+ do_something();
+
+The solution is again ACCESS_ONCE(), which preserves the ordering between
+the load from variable 'a' and the store to variable 'b':
+
+ q = ACCESS_ONCE(a);
+ if (q) {
+ ACCESS_ONCE(b) = p;
+ do_something();
+ } else {
+ ACCESS_ONCE(b) = p;
+ do_something_else();
+ }
+
+You could also use barrier() to prevent the compiler from moving
+the stores to variable 'b', but barrier() would not prevent the
+compiler from proving to itself that a==1 always, so ACCESS_ONCE()
+is also needed.
+
+It is important to note that control dependencies absolutely require a
+a conditional. For example, the following "optimized" version of
+the above example breaks ordering:
+
+ q = ACCESS_ONCE(a);
+ ACCESS_ONCE(b) = p; /* BUG: No ordering vs. load from a!!! */
+ if (q) {
+ /* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
+ do_something();
+ } else {
+ /* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
+ do_something_else();
}
- x = *q;
+
+It is of course legal for the prior load to be part of the conditional,
+for example, as follows:
+
+ if (ACCESS_ONCE(a) > 0) {
+ ACCESS_ONCE(b) = q / 2;
+ do_something();
+ } else {
+ ACCESS_ONCE(b) = q / 3;
+ do_something_else();
+ }
+
+This will again ensure that the load from variable 'a' is ordered before the
+stores to variable 'b'.
+
+In addition, you need to be careful what you do with the local variable 'q',
+otherwise the compiler might be able to guess the value and again remove
+the needed conditional. For example:
+
+ q = ACCESS_ONCE(a);
+ if (q % MAX) {
+ ACCESS_ONCE(b) = p;
+ do_something();
+ } else {
+ ACCESS_ONCE(b) = p;
+ do_something_else();
+ }
+
+If MAX is defined to be 1, then the compiler knows that (q % MAX) is
+equal to zero, in which case the compiler is within its rights to
+transform the above code into the following:
+
+ q = ACCESS_ONCE(a);
+ ACCESS_ONCE(b) = p;
+ do_something_else();
+
+This transformation loses the ordering between the load from variable 'a'
+and the store to variable 'b'. If you are relying on this ordering, you
+should do something like the following:
+
+ q = ACCESS_ONCE(a);
+ BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
+ if (q % MAX) {
+ ACCESS_ONCE(b) = p;
+ do_something();
+ } else {
+ ACCESS_ONCE(b) = p;
+ do_something_else();
+ }
+
+Finally, control dependencies do -not- provide transitivity. This is
+demonstrated by two related examples:
+
+ CPU 0 CPU 1
+ ===================== =====================
+ r1 = ACCESS_ONCE(x); r2 = ACCESS_ONCE(y);
+ if (r1 >= 0) if (r2 >= 0)
+ ACCESS_ONCE(y) = 1; ACCESS_ONCE(x) = 1;
+
+ assert(!(r1 == 1 && r2 == 1));
+
+The above two-CPU example will never trigger the assert(). However,
+if control dependencies guaranteed transitivity (which they do not),
+then adding the following two CPUs would guarantee a related assertion:
+
+ CPU 2 CPU 3
+ ===================== =====================
+ ACCESS_ONCE(x) = 2; ACCESS_ONCE(y) = 2;
+
+ assert(!(r1 == 2 && r2 == 2 && x == 1 && y == 1)); /* FAILS!!! */
+
+But because control dependencies do -not- provide transitivity, the
+above assertion can fail after the combined four-CPU example completes.
+If you need the four-CPU example to provide ordering, you will need
+smp_mb() between the loads and stores in the CPU 0 and CPU 1 code fragments.
+
+In summary:
+
+ (*) Control dependencies can order prior loads against later stores.
+ However, they do -not- guarantee any other sort of ordering:
+ Not prior loads against later loads, nor prior stores against
+ later anything. If you need these other forms of ordering,
+ use smb_rmb(), smp_wmb(), or, in the case of prior stores and
+ later loads, smp_mb().
+
+ (*) Control dependencies require at least one run-time conditional
+ between the prior load and the subsequent store. If the compiler
+ is able to optimize the conditional away, it will have also
+ optimized away the ordering. Careful use of ACCESS_ONCE() can
+ help to preserve the needed conditional.
+
+ (*) Control dependencies require that the compiler avoid reordering the
+ dependency into nonexistence. Careful use of ACCESS_ONCE() or
+ barrier() can help to preserve your control dependency. Please
+ see the Compiler Barrier section for more information.
+
+ (*) Control dependencies do -not- provide transitivity. If you
+ need transitivity, use smp_mb().
SMP BARRIER PAIRING
@@ -561,23 +749,23 @@ barrier, though a general barrier would also be viable. Similarly a read
barrier or a data dependency barrier should always be paired with at least an
write barrier, though, again, a general barrier is viable:
- CPU 1 CPU 2
- =============== ===============
- a = 1;
+ CPU 1 CPU 2
+ =============== ===============
+ ACCESS_ONCE(a) = 1;
<write barrier>
- b = 2; x = b;
- <read barrier>
- y = a;
+ ACCESS_ONCE(b) = 2; x = ACCESS_ONCE(b);
+ <read barrier>
+ y = ACCESS_ONCE(a);
Or:
- CPU 1 CPU 2
- =============== ===============================
+ CPU 1 CPU 2
+ =============== ===============================
a = 1;
<write barrier>
- b = &a; x = b;
- <data dependency barrier>
- y = *x;
+ ACCESS_ONCE(b) = &a; x = ACCESS_ONCE(b);
+ <data dependency barrier>
+ y = *x;
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
@@ -586,13 +774,13 @@ the "weaker" type.
match the loads after the read barrier or the data dependency barrier, and vice
versa:
- CPU 1 CPU 2
- =============== ===============
- a = 1; }---- --->{ v = c
- b = 2; } \ / { w = d
- <write barrier> \ <read barrier>
- c = 3; } / \ { x = a;
- d = 4; }---- --->{ y = b;
+ CPU 1 CPU 2
+ =================== ===================
+ ACCESS_ONCE(a) = 1; }---- --->{ v = ACCESS_ONCE(c);
+ ACCESS_ONCE(b) = 2; } \ / { w = ACCESS_ONCE(d);
+ <write barrier> \ <read barrier>
+ ACCESS_ONCE(c) = 3; } / \ { x = ACCESS_ONCE(a);
+ ACCESS_ONCE(d) = 4; }---- --->{ y = ACCESS_ONCE(b);
EXAMPLES OF MEMORY BARRIER SEQUENCES
@@ -882,12 +1070,12 @@ cache it for later use.
Consider:
- CPU 1 CPU 2
+ CPU 1 CPU 2
======================= =======================
- LOAD B
- DIVIDE } Divide instructions generally
- DIVIDE } take a long time to perform
- LOAD A
+ LOAD B
+ DIVIDE } Divide instructions generally
+ DIVIDE } take a long time to perform
+ LOAD A
Which might appear as this:
@@ -910,13 +1098,13 @@ Which might appear as this:
Placing a read barrier or a data dependency barrier just before the second
load:
- CPU 1 CPU 2
+ CPU 1 CPU 2
======================= =======================
- LOAD B
- DIVIDE
- DIVIDE
+ LOAD B
+ DIVIDE
+ DIVIDE
<read barrier>
- LOAD A
+ LOAD A
will force any value speculatively obtained to be reconsidered to an extent
dependent on the type of barrier used. If there was no change made to the
@@ -1042,10 +1230,277 @@ compiler from moving the memory accesses either side of it to the other side:
barrier();
-This is a general barrier - lesser varieties of compiler barrier do not exist.
+This is a general barrier -- there are no read-read or write-write variants
+of barrier(). However, ACCESS_ONCE() can be thought of as a weak form
+for barrier() that affects only the specific accesses flagged by the
+ACCESS_ONCE().
+
+The barrier() function has the following effects:
+
+ (*) Prevents the compiler from reordering accesses following the
+ barrier() to precede any accesses preceding the barrier().
+ One example use for this property is to ease communication between
+ interrupt-handler code and the code that was interrupted.
+
+ (*) Within a loop, forces the compiler to load the variables used
+ in that loop's conditional on each pass through that loop.
+
+The ACCESS_ONCE() function can prevent any number of optimizations that,
+while perfectly safe in single-threaded code, can be fatal in concurrent
+code. Here are some examples of these sorts of optimizations:
+
+ (*) The compiler is within its rights to merge successive loads from
+ the same variable. Such merging can cause the compiler to "optimize"
+ the following code:
+
+ while (tmp = a)
+ do_something_with(tmp);
+
+ into the following code, which, although in some sense legitimate
+ for single-threaded code, is almost certainly not what the developer
+ intended:
+
+ if (tmp = a)
+ for (;;)
+ do_something_with(tmp);
+
+ Use ACCESS_ONCE() to prevent the compiler from doing this to you:
+
+ while (tmp = ACCESS_ONCE(a))
+ do_something_with(tmp);
+
+ (*) The compiler is within its rights to reload a variable, for example,
+ in cases where high register pressure prevents the compiler from
+ keeping all data of interest in registers. The compiler might
+ therefore optimize the variable 'tmp' out of our previous example:
+
+ while (tmp = a)
+ do_something_with(tmp);
+
+ This could result in the following code, which is perfectly safe in
+ single-threaded code, but can be fatal in concurrent code:
+
+ while (a)
+ do_something_with(a);
+
+ For example, the optimized version of this code could result in
+ passing a zero to do_something_with() in the case where the variable
+ a was modified by some other CPU between the "while" statement and
+ the call to do_something_with().
+
+ Again, use ACCESS_ONCE() to prevent the compiler from doing this:
+
+ while (tmp = ACCESS_ONCE(a))
+ do_something_with(tmp);
+
+ Note that if the compiler runs short of registers, it might save
+ tmp onto the stack. The overhead of this saving and later restoring
+ is why compilers reload variables. Doing so is perfectly safe for
+ single-threaded code, so you need to tell the compiler about cases
+ where it is not safe.
+
+ (*) The compiler is within its rights to omit a load entirely if it knows
+ what the value will be. For example, if the compiler can prove that
+ the value of variable 'a' is always zero, it can optimize this code:
+
+ while (tmp = a)
+ do_something_with(tmp);
-The compiler barrier has no direct effect on the CPU, which may then reorder
-things however it wishes.
+ Into this:
+
+ do { } while (0);
+
+ This transformation is a win for single-threaded code because it gets
+ rid of a load and a branch. The problem is that the compiler will
+ carry out its proof assuming that the current CPU is the only one
+ updating variable 'a'. If variable 'a' is shared, then the compiler's
+ proof will be erroneous. Use ACCESS_ONCE() to tell the compiler
+ that it doesn't know as much as it thinks it does:
+
+ while (tmp = ACCESS_ONCE(a))
+ do_something_with(tmp);
+
+ But please note that the compiler is also closely watching what you
+ do with the value after the ACCESS_ONCE(). For example, suppose you
+ do the following and MAX is a preprocessor macro with the value 1:
+
+ while ((tmp = ACCESS_ONCE(a)) % MAX)
+ do_something_with(tmp);
+
+ Then the compiler knows that the result of the "%" operator applied
+ to MAX will always be zero, again allowing the compiler to optimize
+ the code into near-nonexistence. (It will still load from the
+ variable 'a'.)
+
+ (*) Similarly, the compiler is within its rights to omit a store entirely
+ if it knows that the variable already has the value being stored.
+ Again, the compiler assumes that the current CPU is the only one
+ storing into the variable, which can cause the compiler to do the
+ wrong thing for shared variables. For example, suppose you have
+ the following:
+
+ a = 0;
+ /* Code that does not store to variable a. */
+ a = 0;
+
+ The compiler sees that the value of variable 'a' is already zero, so
+ it might well omit the second store. This would come as a fatal
+ surprise if some other CPU might have stored to variable 'a' in the
+ meantime.
+
+ Use ACCESS_ONCE() to prevent the compiler from making this sort of
+ wrong guess:
+
+ ACCESS_ONCE(a) = 0;
+ /* Code that does not store to variable a. */
+ ACCESS_ONCE(a) = 0;
+
+ (*) The compiler is within its rights to reorder memory accesses unless
+ you tell it not to. For example, consider the following interaction
+ between process-level code and an interrupt handler:
+
+ void process_level(void)
+ {
+ msg = get_message();
+ flag = true;
+ }
+
+ void interrupt_handler(void)
+ {
+ if (flag)
+ process_message(msg);
+ }
+
+ There is nothing to prevent the the compiler from transforming
+ process_level() to the following, in fact, this might well be a
+ win for single-threaded code:
+
+ void process_level(void)
+ {
+ flag = true;
+ msg = get_message();
+ }
+
+ If the interrupt occurs between these two statement, then
+ interrupt_handler() might be passed a garbled msg. Use ACCESS_ONCE()
+ to prevent this as follows:
+
+ void process_level(void)
+ {
+ ACCESS_ONCE(msg) = get_message();
+ ACCESS_ONCE(flag) = true;
+ }
+
+ void interrupt_handler(void)
+ {
+ if (ACCESS_ONCE(flag))
+ process_message(ACCESS_ONCE(msg));
+ }
+
+ Note that the ACCESS_ONCE() wrappers in interrupt_handler()
+ are needed if this interrupt handler can itself be interrupted
+ by something that also accesses 'flag' and 'msg', for example,
+ a nested interrupt or an NMI. Otherwise, ACCESS_ONCE() is not
+ needed in interrupt_handler() other than for documentation purposes.
+ (Note also that nested interrupts do not typically occur in modern
+ Linux kernels, in fact, if an interrupt handler returns with
+ interrupts enabled, you will get a WARN_ONCE() splat.)
+
+ You should assume that the compiler can move ACCESS_ONCE() past
+ code not containing ACCESS_ONCE(), barrier(), or similar primitives.
+
+ This effect could also be achieved using barrier(), but ACCESS_ONCE()
+ is more selective: With ACCESS_ONCE(), the compiler need only forget
+ the contents of the indicated memory locations, while with barrier()
+ the compiler must discard the value of all memory locations that
+ it has currented cached in any machine registers. Of course,
+ the compiler must also respect the order in which the ACCESS_ONCE()s
+ occur, though the CPU of course need not do so.
+
+ (*) The compiler is within its rights to invent stores to a variable,
+ as in the following example:
+
+ if (a)
+ b = a;
+ else
+ b = 42;
+
+ The compiler might save a branch by optimizing this as follows:
+
+ b = 42;
+ if (a)
+ b = a;
+
+ In single-threaded code, this is not only safe, but also saves
+ a branch. Unfortunately, in concurrent code, this optimization
+ could cause some other CPU to see a spurious value of 42 -- even
+ if variable 'a' was never zero -- when loading variable 'b'.
+ Use ACCESS_ONCE() to prevent this as follows:
+
+ if (a)
+ ACCESS_ONCE(b) = a;
+ else
+ ACCESS_ONCE(b) = 42;
+
+ The compiler can also invent loads. These are usually less
+ damaging, but they can result in cache-line bouncing and thus in
+ poor performance and scalability. Use ACCESS_ONCE() to prevent
+ invented loads.
+
+ (*) For aligned memory locations whose size allows them to be accessed
+ with a single memory-reference instruction, prevents "load tearing"
+ and "store tearing," in which a single large access is replaced by
+ multiple smaller accesses. For example, given an architecture having
+ 16-bit store instructions with 7-bit immediate fields, the compiler
+ might be tempted to use two 16-bit store-immediate instructions to
+ implement the following 32-bit store:
+
+ p = 0x00010002;
+
+ Please note that GCC really does use this sort of optimization,
+ which is not surprising given that it would likely take more
+ than two instructions to build the constant and then store it.
+ This optimization can therefore be a win in single-threaded code.
+ In fact, a recent bug (since fixed) caused GCC to incorrectly use
+ this optimization in a volatile store. In the absence of such bugs,
+ use of ACCESS_ONCE() prevents store tearing in the following example:
+
+ ACCESS_ONCE(p) = 0x00010002;
+
+ Use of packed structures can also result in load and store tearing,
+ as in this example:
+
+ struct __attribute__((__packed__)) foo {
+ short a;
+ int b;
+ short c;
+ };
+ struct foo foo1, foo2;
+ ...
+
+ foo2.a = foo1.a;
+ foo2.b = foo1.b;
+ foo2.c = foo1.c;
+
+ Because there are no ACCESS_ONCE() wrappers and no volatile markings,
+ the compiler would be well within its rights to implement these three
+ assignment statements as a pair of 32-bit loads followed by a pair
+ of 32-bit stores. This would result in load tearing on 'foo1.b'
+ and store tearing on 'foo2.b'. ACCESS_ONCE() again prevents tearing
+ in this example:
+
+ foo2.a = foo1.a;
+ ACCESS_ONCE(foo2.b) = ACCESS_ONCE(foo1.b);
+ foo2.c = foo1.c;
+
+All that aside, it is never necessary to use ACCESS_ONCE() on a variable
+that has been marked volatile. For example, because 'jiffies' is marked
+volatile, it is never necessary to say ACCESS_ONCE(jiffies). The reason
+for this is that ACCESS_ONCE() is implemented as a volatile cast, which
+has no effect when its argument is already marked volatile.
+
+Please note that these compiler barriers have no direct effect on the CPU,
+which may then reorder things however it wishes.
CPU MEMORY BARRIERS
@@ -1135,7 +1590,7 @@ There are some more advanced barrier functions:
clear_bit( ... );
This prevents memory operations before the clear leaking to after it. See
- the subsection on "Locking Functions" with reference to UNLOCK operation
+ the subsection on "Locking Functions" with reference to RELEASE operation
implications.
See Documentation/atomic_ops.txt for more information. See the "Atomic
@@ -1169,8 +1624,8 @@ provide more substantial guarantees, but these may not be relied upon outside
of arch specific code.
-LOCKING FUNCTIONS
------------------
+ACQUIRING FUNCTIONS
+-------------------
The Linux kernel has a number of locking constructs:
@@ -1181,65 +1636,107 @@ The Linux kernel has a number of locking constructs:
(*) R/W semaphores
(*) RCU
-In all cases there are variants on "LOCK" operations and "UNLOCK" operations
+In all cases there are variants on "ACQUIRE" operations and "RELEASE" operations
for each construct. These operations all imply certain barriers:
- (1) LOCK operation implication:
+ (1) ACQUIRE operation implication:
- Memory operations issued after the LOCK will be completed after the LOCK
- operation has completed.
+ Memory operations issued after the ACQUIRE will be completed after the
+ ACQUIRE operation has completed.
- Memory operations issued before the LOCK may be completed after the LOCK
- operation has completed.
+ Memory operations issued before the ACQUIRE may be completed after the
+ ACQUIRE operation has completed. An smp_mb__before_spinlock(), combined
+ with a following ACQUIRE, orders prior loads against subsequent stores and
+ stores and prior stores against subsequent stores. Note that this is
+ weaker than smp_mb()! The smp_mb__before_spinlock() primitive is free on
+ many architectures.
- (2) UNLOCK operation implication:
+ (2) RELEASE operation implication:
- Memory operations issued before the UNLOCK will be completed before the
- UNLOCK operation has completed.
+ Memory operations issued before the RELEASE will be completed before the
+ RELEASE operation has completed.
- Memory operations issued after the UNLOCK may be completed before the
- UNLOCK operation has completed.
+ Memory operations issued after the RELEASE may be completed before the
+ RELEASE operation has completed.
- (3) LOCK vs LOCK implication:
+ (3) ACQUIRE vs ACQUIRE implication:
- All LOCK operations issued before another LOCK operation will be completed
- before that LOCK operation.
+ All ACQUIRE operations issued before another ACQUIRE operation will be
+ completed before that ACQUIRE operation.
- (4) LOCK vs UNLOCK implication:
+ (4) ACQUIRE vs RELEASE implication:
- All LOCK operations issued before an UNLOCK operation will be completed
- before the UNLOCK operation.
+ All ACQUIRE operations issued before a RELEASE operation will be
+ completed before the RELEASE operation.
- All UNLOCK operations issued before a LOCK operation will be completed
- before the LOCK operation.
+ (5) Failed conditional ACQUIRE implication:
- (5) Failed conditional LOCK implication:
-
- Certain variants of the LOCK operation may fail, either due to being
- unable to get the lock immediately, or due to receiving an unblocked
+ Certain locking variants of the ACQUIRE operation may fail, either due to
+ being unable to get the lock immediately, or due to receiving an unblocked
signal whilst asleep waiting for the lock to become available. Failed
locks do not imply any sort of barrier.
-Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
-equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
-
-[!] Note: one of the consequences of LOCKs and UNLOCKs being only one-way
- barriers is that the effects of instructions outside of a critical section
- may seep into the inside of the critical section.
+[!] Note: one of the consequences of lock ACQUIREs and RELEASEs being only
+one-way barriers is that the effects of instructions outside of a critical
+section may seep into the inside of the critical section.
-A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
-because it is possible for an access preceding the LOCK to happen after the
-LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
-two accesses can themselves then cross:
+An ACQUIRE followed by a RELEASE may not be assumed to be full memory barrier
+because it is possible for an access preceding the ACQUIRE to happen after the
+ACQUIRE, and an access following the RELEASE to happen before the RELEASE, and
+the two accesses can themselves then cross:
*A = a;
- LOCK
- UNLOCK
+ ACQUIRE M
+ RELEASE M
*B = b;
may occur as:
- LOCK, STORE *B, STORE *A, UNLOCK
+ ACQUIRE M, STORE *B, STORE *A, RELEASE M
+
+This same reordering can of course occur if the lock's ACQUIRE and RELEASE are
+to the same lock variable, but only from the perspective of another CPU not
+holding that lock.
+
+In short, a RELEASE followed by an ACQUIRE may -not- be assumed to be a full
+memory barrier because it is possible for a preceding RELEASE to pass a
+later ACQUIRE from the viewpoint of the CPU, but not from the viewpoint
+of the compiler. Note that deadlocks cannot be introduced by this
+interchange because if such a deadlock threatened, the RELEASE would
+simply complete.
+
+If it is necessary for a RELEASE-ACQUIRE pair to produce a full barrier, the
+ACQUIRE can be followed by an smp_mb__after_unlock_lock() invocation. This
+will produce a full barrier if either (a) the RELEASE and the ACQUIRE are
+executed by the same CPU or task, or (b) the RELEASE and ACQUIRE act on the
+same variable. The smp_mb__after_unlock_lock() primitive is free on many
+architectures. Without smp_mb__after_unlock_lock(), the critical sections
+corresponding to the RELEASE and the ACQUIRE can cross:
+
+ *A = a;
+ RELEASE M
+ ACQUIRE N
+ *B = b;
+
+could occur as:
+
+ ACQUIRE N, STORE *B, STORE *A, RELEASE M
+
+With smp_mb__after_unlock_lock(), they cannot, so that:
+
+ *A = a;
+ RELEASE M
+ ACQUIRE N
+ smp_mb__after_unlock_lock();
+ *B = b;
+
+will always occur as either of the following:
+
+ STORE *A, RELEASE, ACQUIRE, STORE *B
+ STORE *A, ACQUIRE, RELEASE, STORE *B
+
+If the RELEASE and ACQUIRE were instead both operating on the same lock
+variable, only the first of these two alternatives can occur.
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
@@ -1253,33 +1750,33 @@ As an example, consider the following:
*A = a;
*B = b;
- LOCK
+ ACQUIRE
*C = c;
*D = d;
- UNLOCK
+ RELEASE
*E = e;
*F = f;
The following sequence of events is acceptable:
- LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK
+ ACQUIRE, {*F,*A}, *E, {*C,*D}, *B, RELEASE
[+] Note that {*F,*A} indicates a combined access.
But none of the following are:
- {*F,*A}, *B, LOCK, *C, *D, UNLOCK, *E
- *A, *B, *C, LOCK, *D, UNLOCK, *E, *F
- *A, *B, LOCK, *C, UNLOCK, *D, *E, *F
- *B, LOCK, *C, *D, UNLOCK, {*F,*A}, *E
+ {*F,*A}, *B, ACQUIRE, *C, *D, RELEASE, *E
+ *A, *B, *C, ACQUIRE, *D, RELEASE, *E, *F
+ *A, *B, ACQUIRE, *C, RELEASE, *D, *E, *F
+ *B, ACQUIRE, *C, *D, RELEASE, {*F,*A}, *E
INTERRUPT DISABLING FUNCTIONS
-----------------------------
-Functions that disable interrupts (LOCK equivalent) and enable interrupts
-(UNLOCK equivalent) will act as compiler barriers only. So if memory or I/O
+Functions that disable interrupts (ACQUIRE equivalent) and enable interrupts
+(RELEASE equivalent) will act as compiler barriers only. So if memory or I/O
barriers are required in such a situation, they must be provided from some
other means.
@@ -1418,75 +1915,81 @@ Other functions that imply barriers:
(*) schedule() and similar imply full memory barriers.
-=================================
-INTER-CPU LOCKING BARRIER EFFECTS
-=================================
+===================================
+INTER-CPU ACQUIRING BARRIER EFFECTS
+===================================
On SMP systems locking primitives give a more substantial form of barrier: one
that does affect memory access ordering on other CPUs, within the context of
conflict on any particular lock.
-LOCKS VS MEMORY ACCESSES
-------------------------
+ACQUIRES VS MEMORY ACCESSES
+---------------------------
Consider the following: the system has a pair of spinlocks (M) and (Q), and
three CPUs; then should the following sequence of events occur:
CPU 1 CPU 2
=============================== ===============================
- *A = a; *E = e;
- LOCK M LOCK Q
- *B = b; *F = f;
- *C = c; *G = g;
- UNLOCK M UNLOCK Q
- *D = d; *H = h;
+ ACCESS_ONCE(*A) = a; ACCESS_ONCE(*E) = e;
+ ACQUIRE M ACQUIRE Q
+ ACCESS_ONCE(*B) = b; ACCESS_ONCE(*F) = f;
+ ACCESS_ONCE(*C) = c; ACCESS_ONCE(*G) = g;
+ RELEASE M RELEASE Q
+ ACCESS_ONCE(*D) = d; ACCESS_ONCE(*H) = h;
Then there is no guarantee as to what order CPU 3 will see the accesses to *A
through *H occur in, other than the constraints imposed by the separate locks
on the separate CPUs. It might, for example, see:
- *E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M
+ *E, ACQUIRE M, ACQUIRE Q, *G, *C, *F, *A, *B, RELEASE Q, *D, *H, RELEASE M
But it won't see any of:
- *B, *C or *D preceding LOCK M
- *A, *B or *C following UNLOCK M
- *F, *G or *H preceding LOCK Q
- *E, *F or *G following UNLOCK Q
+ *B, *C or *D preceding ACQUIRE M
+ *A, *B or *C following RELEASE M
+ *F, *G or *H preceding ACQUIRE Q
+ *E, *F or *G following RELEASE Q
However, if the following occurs:
CPU 1 CPU 2
=============================== ===============================
- *A = a;
- LOCK M [1]
- *B = b;
- *C = c;
- UNLOCK M [1]
- *D = d; *E = e;
- LOCK M [2]
- *F = f;
- *G = g;
- UNLOCK M [2]
- *H = h;
+ ACCESS_ONCE(*A) = a;
+ ACQUIRE M [1]
+ ACCESS_ONCE(*B) = b;
+ ACCESS_ONCE(*C) = c;
+ RELEASE M [1]
+ ACCESS_ONCE(*D) = d; ACCESS_ONCE(*E) = e;
+ ACQUIRE M [2]
+ smp_mb__after_unlock_lock();
+ ACCESS_ONCE(*F) = f;
+ ACCESS_ONCE(*G) = g;
+ RELEASE M [2]
+ ACCESS_ONCE(*H) = h;
CPU 3 might see:
- *E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
- LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
+ *E, ACQUIRE M [1], *C, *B, *A, RELEASE M [1],
+ ACQUIRE M [2], *H, *F, *G, RELEASE M [2], *D
But assuming CPU 1 gets the lock first, CPU 3 won't see any of:
- *B, *C, *D, *F, *G or *H preceding LOCK M [1]
- *A, *B or *C following UNLOCK M [1]
- *F, *G or *H preceding LOCK M [2]
- *A, *B, *C, *E, *F or *G following UNLOCK M [2]
+ *B, *C, *D, *F, *G or *H preceding ACQUIRE M [1]
+ *A, *B or *C following RELEASE M [1]
+ *F, *G or *H preceding ACQUIRE M [2]
+ *A, *B, *C, *E, *F or *G following RELEASE M [2]
+Note that the smp_mb__after_unlock_lock() is critically important
+here: Without it CPU 3 might see some of the above orderings.
+Without smp_mb__after_unlock_lock(), the accesses are not guaranteed
+to be seen in order unless CPU 3 holds lock M.
-LOCKS VS I/O ACCESSES
----------------------
+
+ACQUIRES VS I/O ACCESSES
+------------------------
Under certain circumstances (especially involving NUMA), I/O accesses within
two spinlocked sections on two different CPUs may be seen as interleaved by the
@@ -1687,28 +2190,30 @@ explicit lock operations, described later). These include:
xchg();
cmpxchg();
- atomic_xchg();
- atomic_cmpxchg();
- atomic_inc_return();
- atomic_dec_return();
- atomic_add_return();
- atomic_sub_return();
- atomic_inc_and_test();
- atomic_dec_and_test();
- atomic_sub_and_test();
- atomic_add_negative();
- atomic_add_unless(); /* when succeeds (returns 1) */
+ atomic_xchg(); atomic_long_xchg();
+ atomic_cmpxchg(); atomic_long_cmpxchg();
+ atomic_inc_return(); atomic_long_inc_return();
+ atomic_dec_return(); atomic_long_dec_return();
+ atomic_add_return(); atomic_long_add_return();
+ atomic_sub_return(); atomic_long_sub_return();
+ atomic_inc_and_test(); atomic_long_inc_and_test();
+ atomic_dec_and_test(); atomic_long_dec_and_test();
+ atomic_sub_and_test(); atomic_long_sub_and_test();
+ atomic_add_negative(); atomic_long_add_negative();
test_and_set_bit();
test_and_clear_bit();
test_and_change_bit();
-These are used for such things as implementing LOCK-class and UNLOCK-class
+ /* when succeeds (returns 1) */
+ atomic_add_unless(); atomic_long_add_unless();
+
+These are used for such things as implementing ACQUIRE-class and RELEASE-class
operations and adjusting reference counters towards object destruction, and as
such the implicit memory barrier effects are necessary.
The following operations are potential problems as they do _not_ imply memory
-barriers, but might be used for implementing such things as UNLOCK-class
+barriers, but might be used for implementing such things as RELEASE-class
operations:
atomic_set();
@@ -1750,7 +2255,7 @@ The following operations are special locking primitives:
clear_bit_unlock();
__clear_bit_unlock();
-These implement LOCK-class and UNLOCK-class operations. These should be used in
+These implement ACQUIRE-class and RELEASE-class operations. These should be used in
preference to other operations when implementing locking primitives, because
their implementations can be optimised on many architectures.
@@ -1887,8 +2392,8 @@ functions:
space should suffice for PCI.
[*] NOTE! attempting to load from the same location as was written to may
- cause a malfunction - consider the 16550 Rx/Tx serial registers for
- example.
+ cause a malfunction - consider the 16550 Rx/Tx serial registers for
+ example.
Used with prefetchable I/O memory, an mmiowb() barrier may be required to
force stores to be ordered.
@@ -1955,19 +2460,19 @@ barriers for the most part act at the interface between the CPU and its cache
:
+--------+ +--------+ : +--------+ +-----------+
| | | | : | | | | +--------+
- | CPU | | Memory | : | CPU | | | | |
- | Core |--->| Access |----->| Cache |<-->| | | |
+ | CPU | | Memory | : | CPU | | | | |
+ | Core |--->| Access |----->| Cache |<-->| | | |
| | | Queue | : | | | |--->| Memory |
- | | | | : | | | | | |
- +--------+ +--------+ : +--------+ | | | |
+ | | | | : | | | | | |
+ +--------+ +--------+ : +--------+ | | | |
: | Cache | +--------+
: | Coherency |
: | Mechanism | +--------+
+--------+ +--------+ : +--------+ | | | |
| | | | : | | | | | |
| CPU | | Memory | : | CPU | | |--->| Device |
- | Core |--->| Access |----->| Cache |<-->| | | |
- | | | Queue | : | | | | | |
+ | Core |--->| Access |----->| Cache |<-->| | | |
+ | | | Queue | : | | | | | |
| | | | : | | | | +--------+
+--------+ +--------+ : +--------+ +-----------+
:
@@ -2090,7 +2595,7 @@ CPU's caches by some other cache event:
p = &v; q = p;
<D:request p>
<B:modify p=&v> <D:commit p=&v>
- <D:read p>
+ <D:read p>
x = *q;
<C:read *q> Reads from v before v updated in cache
<C:unbusy>
@@ -2115,7 +2620,7 @@ queue before processing any further requests:
p = &v; q = p;
<D:request p>
<B:modify p=&v> <D:commit p=&v>
- <D:read p>
+ <D:read p>
smp_read_barrier_depends()
<C:unbusy>
<C:commit v=2>
@@ -2177,11 +2682,11 @@ A programmer might take it for granted that the CPU will perform memory
operations in exactly the order specified, so that if the CPU is, for example,
given the following piece of code to execute:
- a = *A;
- *B = b;
- c = *C;
- d = *D;
- *E = e;
+ a = ACCESS_ONCE(*A);
+ ACCESS_ONCE(*B) = b;
+ c = ACCESS_ONCE(*C);
+ d = ACCESS_ONCE(*D);
+ ACCESS_ONCE(*E) = e;
they would then expect that the CPU will complete the memory operation for each
instruction before moving on to the next one, leading to a definite sequence of
@@ -2228,12 +2733,12 @@ However, it is guaranteed that a CPU will be self-consistent: it will see its
_own_ accesses appear to be correctly ordered, without the need for a memory
barrier. For instance with the following code:
- U = *A;
- *A = V;
- *A = W;
- X = *A;
- *A = Y;
- Z = *A;
+ U = ACCESS_ONCE(*A);
+ ACCESS_ONCE(*A) = V;
+ ACCESS_ONCE(*A) = W;
+ X = ACCESS_ONCE(*A);
+ ACCESS_ONCE(*A) = Y;
+ Z = ACCESS_ONCE(*A);
and assuming no intervention by an external influence, it can be assumed that
the final result will appear to be:
@@ -2250,7 +2755,12 @@ accesses:
in that order, but, without intervention, the sequence may have almost any
combination of elements combined or discarded, provided the program's view of
-the world remains consistent.
+the world remains consistent. Note that ACCESS_ONCE() is -not- optional
+in the above example, as there are architectures where a given CPU might
+interchange successive loads to the same location. On such architectures,
+ACCESS_ONCE() does whatever is necessary to prevent this, for example, on
+Itanium the volatile casts used by ACCESS_ONCE() cause GCC to emit the
+special ld.acq and st.rel instructions that prevent such reordering.
The compiler may also combine, discard or defer elements of the sequence before
the CPU even sees them.
@@ -2264,13 +2774,13 @@ may be reduced to:
*A = W;
-since, without a write barrier, it can be assumed that the effect of the
-storage of V to *A is lost. Similarly:
+since, without either a write barrier or an ACCESS_ONCE(), it can be
+assumed that the effect of the storage of V to *A is lost. Similarly:
*A = Y;
Z = *A;
-may, without a memory barrier, be reduced to:
+may, without a memory barrier or an ACCESS_ONCE(), be reduced to:
*A = Y;
Z = Y;
diff --git a/Documentation/misc-devices/mei/mei-amt-version.c b/Documentation/misc-devices/mei/mei-amt-version.c
index 49e4f77..57d0d87 100644
--- a/Documentation/misc-devices/mei/mei-amt-version.c
+++ b/Documentation/misc-devices/mei/mei-amt-version.c
@@ -115,8 +115,6 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
struct mei_client *cl;
struct mei_connect_client_data data;
- mei_deinit(me);
-
me->verbose = verbose;
me->fd = open("/dev/mei", O_RDWR);
diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt
index fd1cd8a..16eb314 100644
--- a/Documentation/robust-futex-ABI.txt
+++ b/Documentation/robust-futex-ABI.txt
@@ -146,8 +146,8 @@ On removal:
1) set the 'list_op_pending' word to the address of the 'lock entry'
to be removed,
2) remove the lock entry for this lock from the 'head' list,
- 2) release the futex lock, and
- 2) clear the 'lock_op_pending' word.
+ 3) release the futex lock, and
+ 4) clear the 'lock_op_pending' word.
On exit, the kernel will consider the address stored in
'list_op_pending' and the address of each 'lock word' found by walking
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 26b7ee4..6d48640 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -428,11 +428,6 @@ rate for each task.
numa_balancing_scan_size_mb is how many megabytes worth of pages are
scanned for a given scan.
-numa_balancing_settle_count is how many scan periods must complete before
-the schedule balancer stops pushing the task towards a preferred node. This
-gives the scheduler a chance to place the task on an alternative node if the
-preferred node is overloaded.
-
numa_balancing_migrate_deferred is how many page migrations get skipped
unconditionally, after a page migration is skipped because a page is shared
with other tasks. This reduces page migration overhead, and determines
diff --git a/Documentation/vme_api.txt b/Documentation/vme_api.txt
index 856efa3..ffe6e22 100644
--- a/Documentation/vme_api.txt
+++ b/Documentation/vme_api.txt
@@ -393,4 +393,14 @@ Slot Detection
This function returns the slot ID of the provided bridge.
- int vme_slot_get(struct vme_dev *dev);
+ int vme_slot_num(struct vme_dev *dev);
+
+
+Bus Detection
+=============
+
+This function returns the bus ID of the provided bridge.
+
+ int vme_bus_num(struct vme_dev *dev);
+
+
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index f4f268c..cb81741d 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -608,6 +608,9 @@ Protocol: 2.12+
- If 1, the kernel supports the 64-bit EFI handoff entry point
given at handover_offset + 0x200.
+ Bit 4 (read): XLF_EFI_KEXEC
+ - If 1, the kernel supports kexec EFI boot with EFI runtime support.
+
Field name: cmdline_size
Type: read
Offset/size: 0x238/4
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 881582f..c584a51 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -28,4 +28,11 @@ reference.
Current X86-64 implementations only support 40 bits of address space,
but we support up to 46 bits. This expands into MBZ space in the page tables.
+->trampoline_pgd:
+
+We map EFI runtime services in the aforementioned PGD in the virtual
+range of 64Gb (arbitrarily set, can be raised if needed)
+
+0xffffffef00000000 - 0xffffffff00000000
+
-Andi Kleen, Jul 2004
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
index 7fba5aa..6c914aa 100644
--- a/Documentation/zh_CN/HOWTO
+++ b/Documentation/zh_CN/HOWTO
@@ -112,7 +112,7 @@ Linux内核代ç ä¸­åŒ…å«æœ‰å¤§é‡çš„文档。这些文档对于学习如何与
其他关于如何正确地生æˆè¡¥ä¸çš„优秀文档包括:
"The Perfect Patch"
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
@@ -515,7 +515,7 @@ Linux内核社区并ä¸å–œæ¬¢ä¸€ä¸‹æŽ¥æ”¶å¤§æ®µçš„代ç ã€‚修改需è¦è¢«æ°å½“
想了解它具体应该看起æ¥åƒä»€ä¹ˆï¼Œè¯·æŸ¥é˜…以下文档中的“ChangeLogâ€ç« èŠ‚:
“The Perfect Patchâ€
- http://userweb.kernel.org/~akpm/stuff/tpp.txt
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
这些事情有时候åšèµ·æ¥å¾ˆéš¾ã€‚è¦åœ¨ä»»ä½•æ–¹é¢éƒ½åšåˆ°å®Œç¾Žå¯èƒ½éœ€è¦å¥½å‡ å¹´æ—¶é—´ã€‚这是
diff --git a/Documentation/zorro.txt b/Documentation/zorro.txt
index d5829d1..90a64d5 100644
--- a/Documentation/zorro.txt
+++ b/Documentation/zorro.txt
@@ -95,8 +95,9 @@ The treatment of these regions depends on the type of Zorro space:
-------------
linux/include/linux/zorro.h
-linux/include/asm-{m68k,ppc}/zorro.h
-linux/include/linux/zorro_ids.h
+linux/include/uapi/linux/zorro.h
+linux/include/uapi/linux/zorro_ids.h
+linux/arch/m68k/include/asm/zorro.h
linux/drivers/zorro
/proc/bus/zorro
diff --git a/MAINTAINERS b/MAINTAINERS
index d5e4ff3..88ae0ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
F: arch/arm/boot/dts/sama*.dtsi
ARM/CALXEDA HIGHBANK ARCHITECTURE
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-highbank/
@@ -1368,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git
S: Supported
F: arch/arm/mach-zynq/
F: drivers/cpuidle/cpuidle-zynq.c
+N: zynq
+N: xilinx
+F: drivers/clocksource/cadence_ttc_timer.c
ARM SMMU DRIVER
M: Will Deacon <will.deacon@arm.com>
@@ -2616,7 +2619,7 @@ S: Maintained
F: drivers/platform/x86/dell-laptop.c
DELL LAPTOP SMM DRIVER
-S: Orphan
+M: Guenter Roeck <linux@roeck-us.net>
F: drivers/char/i8k.c
F: include/uapi/linux/i8k.h
@@ -2635,7 +2638,7 @@ DESIGNWARE USB2 DRD IP DRIVER
M: Paul Zimmerman <paulz@synopsys.com>
L: linux-usb@vger.kernel.org
S: Maintained
-F: drivers/staging/dwc2/
+F: drivers/usb/dwc2/
DESIGNWARE USB3 DRD IP DRIVER
M: Felipe Balbi <balbi@ti.com>
@@ -2825,8 +2828,10 @@ F: include/uapi/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
+M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
+Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported
F: drivers/gpu/drm/i915/
@@ -3330,6 +3335,7 @@ EXTERNAL CONNECTOR SUBSYSTEM (EXTCON)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/extcon.git
S: Maintained
F: drivers/extcon/
F: Documentation/extcon/
@@ -5136,6 +5142,11 @@ F: drivers/lguest/
F: include/linux/lguest*.h
F: tools/lguest/
+LIBLOCKDEP
+M: Sasha Levin <sasha.levin@oracle.com>
+S: Maintained
+F: tools/lib/lockdep/
+
LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com>
W: http://www.ibm.com/linux/ltc/projects/ppc
@@ -6256,7 +6267,7 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@linaro.org>
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
W: http://fdt.secretlab.ca
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -6268,7 +6279,7 @@ K: of_get_property
K: of_match_table
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh+dt@kernel.org>
M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
@@ -7094,6 +7105,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcu/torture.c
+RCUTORTURE TEST FRAMEWORK
+M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+F: tools/testing/selftests/rcutorture
+
RDC R-321X SoC
M: Florian Fainelli <florian@openwrt.org>
S: Maintained
@@ -9226,6 +9243,7 @@ F: include/media/videobuf2-*
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com>
+L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/char/virtio_console.c
@@ -9235,6 +9253,7 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
M: Rusty Russell <rusty@rustcorp.com.au>
M: "Michael S. Tsirkin" <mst@redhat.com>
+L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/virtio/
@@ -9247,6 +9266,7 @@ F: include/uapi/linux/virtio_*.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
L: kvm@vger.kernel.org
+L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
S: Maintained
diff --git a/Makefile b/Makefile
index 14d592c..455fd48 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*
@@ -595,10 +595,24 @@ ifneq ($(CONFIG_FRAME_WARN),0)
KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
endif
-# Force gcc to behave correct even for buggy distributions
-ifndef CONFIG_CC_STACKPROTECTOR
-KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
+# Handle stack protector mode.
+ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
+ stackp-flag := -fstack-protector
+ ifeq ($(call cc-option, $(stackp-flag)),)
+ $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \
+ -fstack-protector not supported by compiler))
+ endif
+else ifdef CONFIG_CC_STACKPROTECTOR_STRONG
+ stackp-flag := -fstack-protector-strong
+ ifeq ($(call cc-option, $(stackp-flag)),)
+ $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
+ -fstack-protector-strong not supported by compiler)
+ endif
+else
+ # Force off for distro compilers that enable stack protector by default.
+ stackp-flag := $(call cc-option, -fno-stack-protector)
endif
+KBUILD_CFLAGS += $(stackp-flag)
# This warning generated too much noise in a regular build.
# Use make W=1 to enable this warning (see scripts/Makefile.build)
diff --git a/arch/Kconfig b/arch/Kconfig
index f1cf895..80bbb8c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -336,6 +336,73 @@ config SECCOMP_FILTER
See Documentation/prctl/seccomp_filter.txt for details.
+config HAVE_CC_STACKPROTECTOR
+ bool
+ help
+ An arch should select this symbol if:
+ - its compiler supports the -fstack-protector option
+ - it has implemented a stack canary (e.g. __stack_chk_guard)
+
+config CC_STACKPROTECTOR
+ def_bool n
+ help
+ Set when a stack-protector mode is enabled, so that the build
+ can enable kernel-side support for the GCC feature.
+
+choice
+ prompt "Stack Protector buffer overflow detection"
+ depends on HAVE_CC_STACKPROTECTOR
+ default CC_STACKPROTECTOR_NONE
+ help
+ This option turns on the "stack-protector" GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+ the stack just before the return address, and validates
+ the value just before actually returning. Stack based buffer
+ overflows (that need to overwrite this return address) now also
+ overwrite the canary, which gets detected and the attack is then
+ neutralized via a kernel panic.
+
+config CC_STACKPROTECTOR_NONE
+ bool "None"
+ help
+ Disable "stack-protector" GCC feature.
+
+config CC_STACKPROTECTOR_REGULAR
+ bool "Regular"
+ select CC_STACKPROTECTOR
+ help
+ Functions will have the stack-protector canary logic added if they
+ have an 8-byte or larger character array on the stack.
+
+ This feature requires gcc version 4.2 or above, or a distribution
+ gcc with the feature backported ("-fstack-protector").
+
+ On an x86 "defconfig" build, this feature adds canary checks to
+ about 3% of all kernel functions, which increases kernel code size
+ by about 0.3%.
+
+config CC_STACKPROTECTOR_STRONG
+ bool "Strong"
+ select CC_STACKPROTECTOR
+ help
+ Functions will have the stack-protector canary logic added in any
+ of the following conditions:
+
+ - local variable's address used as part of the right hand side of an
+ assignment or function argument
+ - local variable is an array (or union containing an array),
+ regardless of array type or length
+ - uses register local variables
+
+ This feature requires gcc version 4.9 or above, or a distribution
+ gcc with the feature backported ("-fstack-protector-strong").
+
+ On an x86 "defconfig" build, this feature adds canary checks to
+ about 20% of all kernel functions, which increases the kernel code
+ size by about 2%.
+
+endchoice
+
config HAVE_CONTEXT_TRACKING
bool
help
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
index ce8860a..3832bdb 100644
--- a/arch/alpha/include/asm/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
@@ -3,33 +3,18 @@
#include <asm/compiler.h>
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
+#define mb() __asm__ __volatile__("mb": : :"memory")
+#define rmb() __asm__ __volatile__("mb": : :"memory")
+#define wmb() __asm__ __volatile__("wmb": : :"memory")
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
+#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP
#define __ASM_SMP_MB "\tmb\n"
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
#else
#define __ASM_SMP_MB
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
#endif
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* __BARRIER_H */
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 5943f7f..9ae21c1 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,4 +1,5 @@
generic-y += auxvec.h
+generic-y += barrier.h
generic-y += bugs.h
generic-y += bitsperlong.h
generic-y += clkdev.h
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 83f03ca..03e494f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* !CONFIG_ARC_HAS_LLSC */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
index f6cb7c4..c32245c 100644
--- a/arch/arc/include/asm/barrier.h
+++ b/arch/arc/include/asm/barrier.h
@@ -30,11 +30,6 @@
#define smp_wmb() barrier()
#endif
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#define smp_read_barrier_depends() do { } while (0)
#endif
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c1f1a7e..9c909fc 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -30,6 +30,7 @@ config ARM
select HAVE_BPF_JIT
select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
+ select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
@@ -1856,18 +1857,6 @@ config SECCOMP
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
-config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
- help
- This option turns on the -fstack-protector GCC feature. This
- feature puts, at the beginning of functions, a canary value on
- the stack just before the return address, and validates
- the value just before actually returning. Stack based buffer
- overflows (that need to overwrite this return address) now also
- overwrite the canary, which gets detected and the attack is then
- neutralized via a kernel panic.
- This feature requires gcc version 4.2 or above.
-
config SWIOTLB
def_bool y
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c99b108..55b4255 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -40,10 +40,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y)
KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
endif
-ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
-KBUILD_CFLAGS +=-fstack-protector
-endif
-
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index 31bd43b..d4f891f 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -127,6 +127,18 @@ asmlinkage void __div0(void)
error("Attempting division by 0!");
}
+unsigned long __stack_chk_guard;
+
+void __stack_chk_guard_setup(void)
+{
+ __stack_chk_guard = 0x000a0dff;
+}
+
+void __stack_chk_fail(void)
+{
+ error("stack-protector: Kernel stack is corrupted\n");
+}
+
extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
@@ -137,6 +149,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
{
int ret;
+ __stack_chk_guard_setup();
+
output_data = (unsigned char *)output_start;
free_mem_ptr = free_mem_ptr_p;
free_mem_end_ptr = free_mem_ptr_end_p;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 9db5047..177becd 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -559,7 +559,7 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x10800000 0x1000>;
interrupts = <0 33 0>;
- clocks = <&clock 271>;
+ clocks = <&clock 346>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 46e1d7e..9987dd0e 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -241,7 +241,7 @@
sdhi0: sdhi@ee100000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee100000 0 0x100>;
+ reg = <0 0xee100000 0 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 165 4>;
cap-sd-highspeed;
@@ -250,7 +250,7 @@
sdhi1: sdhi@ee120000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee120000 0 0x100>;
+ reg = <0 0xee120000 0 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 166 4>;
cap-sd-highspeed;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 5247674..e674c94 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -332,5 +332,12 @@
clock-frequency = <100000>;
status = "disabled";
};
+
+ timer@01c60000 {
+ compatible = "allwinner,sun5i-a13-hstimer";
+ reg = <0x01c60000 0x1000>;
+ interrupts = <82>, <83>;
+ clocks = <&ahb_gates 28>;
+ };
};
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index ce8ef2a..1ccd75d 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -273,5 +273,12 @@
clock-frequency = <100000>;
status = "disabled";
};
+
+ timer@01c60000 {
+ compatible = "allwinner,sun5i-a13-hstimer";
+ reg = <0x01c60000 0x1000>;
+ interrupts = <82>, <83>;
+ clocks = <&ahb_gates 28>;
+ };
};
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 367611a..0135039 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -395,6 +395,16 @@
status = "disabled";
};
+ hstimer@01c60000 {
+ compatible = "allwinner,sun7i-a20-hstimer";
+ reg = <0x01c60000 0x1000>;
+ interrupts = <0 81 1>,
+ <0 82 1>,
+ <0 83 1>,
+ <0 84 1>;
+ clocks = <&ahb_gates 28>;
+ };
+
gic: interrupt-controller@01c81000 {
compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
reg = <0x01c81000 0x1000>,
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 64205d4..71e5fc7 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,7 +58,7 @@
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__ 7
#endif
#ifdef __thumb__
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index f3d96d9..be068db 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,7 +701,7 @@ $code.=<<___;
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__ 7
#endif
#ifdef __thumb__
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 60f15e2..2f59f74 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -59,6 +59,21 @@
#define smp_wmb() dmb(ishst)
#endif
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3c597c2..fbeb39c 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
*/
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define iounmap __arm_iounmap
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 6976b03..8756e4b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+ && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 141baa3..acabef1 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls (380)
+#define __NR_syscalls (384)
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 75579a9..3759cac 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return __set_phys_to_machine(pfn, mfn);
}
-#define xen_remap(cookie, size) ioremap_cached((cookie), (size));
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size));
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index af33b44..fb5584d 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -406,6 +406,8 @@
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
#define __NR_kcmp (__NR_SYSCALL_BASE+378)
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
+#define __NR_sched_setattr (__NR_SYSCALL_BASE+380)
+#define __NR_sched_getattr (__NR_SYSCALL_BASE+381)
/*
* This may need to be greater than __NR_last_syscall+1 in order to
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index c6ca7e3..166e945 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -389,6 +389,8 @@
CALL(sys_process_vm_writev)
CALL(sys_kcmp)
CALL(sys_finit_module)
+/* 380 */ CALL(sys_sched_setattr)
+ CALL(sys_sched_getattr)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 739c3df..34d5fd5 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
+ return phys_id == cpu_logical_map(cpu);
}
static const void * __init arch_get_next_mach(const char *const **match)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index bc3f2ef..789d846 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event)
s64 period = hwc->sample_period;
int ret = 0;
- /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
- if (unlikely(period != hwc->last_period))
- left = period - (hwc->last_period - left);
-
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index d85055c..20d553c 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
- int (*init_fn)(struct arm_pmu *);
+ const int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
int ret = -ENODEV;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 7940241..4636d56 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -36,7 +36,13 @@
#include <asm/system_misc.h>
#include <asm/opcodes.h>
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
@@ -425,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
instr2 = __mem_to_opcode_thumb16(instr2);
instr = __opcode_thumb32_compose(instr, instr2);
}
- } else if (get_user(instr, (u32 __user *)pc)) {
+ } else {
+ if (get_user(instr, (u32 __user *)pc))
+ goto die_sig;
instr = __mem_to_opcode_arm(instr);
- goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2a700e0..b18165c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -17,6 +17,7 @@
*/
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@@ -853,6 +854,33 @@ static struct notifier_block hyp_init_cpu_nb = {
.notifier_call = hyp_init_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd,
+ void *v)
+{
+ if (cmd == CPU_PM_EXIT) {
+ cpu_init_hyp_mode(NULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyp_init_cpu_pm_nb = {
+ .notifier_call = hyp_init_cpu_pm_notifier,
+};
+
+static void __init hyp_cpu_pm_init(void)
+{
+ cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
+}
+#else
+static inline void hyp_cpu_pm_init(void)
+{
+}
+#endif
+
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -1013,6 +1041,8 @@ int kvm_arch_init(void *opaque)
goto out_err;
}
+ hyp_cpu_pm_init();
+
kvm_coproc_table_init();
return 0;
out_err:
diff --git a/arch/arm/mach-clps711x/devices.c b/arch/arm/mach-clps711x/devices.c
index fb77d14..2001488 100644
--- a/arch/arm/mach-clps711x/devices.c
+++ b/arch/arm/mach-clps711x/devices.c
@@ -61,8 +61,29 @@ static void __init clps711x_add_syscon(void)
&clps711x_syscon_res[i], 1);
}
+static const struct resource clps711x_uart1_res[] __initconst = {
+ DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR1, SZ_128),
+ DEFINE_RES_IRQ(IRQ_UTXINT1),
+ DEFINE_RES_IRQ(IRQ_URXINT1),
+};
+
+static const struct resource clps711x_uart2_res[] __initconst = {
+ DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR2, SZ_128),
+ DEFINE_RES_IRQ(IRQ_UTXINT2),
+ DEFINE_RES_IRQ(IRQ_URXINT2),
+};
+
+static void __init clps711x_add_uart(void)
+{
+ platform_device_register_simple("clps711x-uart", 0, clps711x_uart1_res,
+ ARRAY_SIZE(clps711x_uart1_res));
+ platform_device_register_simple("clps711x-uart", 1, clps711x_uart2_res,
+ ARRAY_SIZE(clps711x_uart2_res));
+};
+
void __init clps711x_devices_init(void)
{
clps711x_add_gpio();
clps711x_add_syscon();
+ clps711x_add_uart();
}
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 9ee78f7..782f6c7 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
void __init footbridge_timer_init(void)
{
struct clock_event_device *ce = &ckevt_dc21285;
+ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+ clocksource_register_hz(&cksrc_dc21285, rate);
setup_irq(ce->irq, &footbridge_timer_irq);
ce->cpumask = cpumask_of(smp_processor_id());
- clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
+ clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
}
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index bd3bf66..c7de89b 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void)
static void highbank_l2x0_disable(void)
{
+ outer_flush_all();
/* Disable PL310 L2 Cache controller */
highbank_smc1(0x102, 0x0);
}
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 4ec8d82..44a59c3 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
{
+ int res;
+
/* LCD enable GPIO */
ldp_lcd_pdata.enable_gpio = gpio + 7;
/* Backlight enable GPIO */
ldp_lcd_pdata.backlight_gpio = gpio + 15;
+ res = platform_device_register(&ldp_lcd_device);
+ if (res)
+ pr_err("Unable to register LCD: %d\n", res);
+
return 0;
}
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
static struct platform_device *ldp_devices[] __initdata = {
&ldp_gpio_keys_device,
- &ldp_lcd_device,
};
#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b39efd4..c0ab9b2 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void)
static void omap4_l2x0_disable(void)
{
+ outer_flush_all();
/* Disable PL310 L2 Cache controller */
omap_smc1(0x102, 0x0);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 56cebb0..d23c77f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
/* gpmc */
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
};
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
- { .irq = 52 },
+ { .irq = 52 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index d337429..4c3b1e6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
};
static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
- { .irq = 24 },
+ { .irq = 24 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
- { .irq = 28 },
+ { .irq = 28 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index db32d53..18f333c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
.class = &dra7xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.main_clk = "uart1_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 2a086e8..958cd6af 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <mach/irqs.h>
+
#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 958e3cb..8ea87bd 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = {
.id = 0,
.dev = {
.platform_data = &lcdc0_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
};
+/* Fixed 3.3V regulator used by LCD backlight */
+static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
+ REGULATOR_SUPPLY("power", "pwm-backlight.0"),
+};
+
/* Fixed 3.3V regulator to be used by SDHI0 */
static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
+ ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 38611526..3c4995a 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -679,7 +679,7 @@ static void __init bockw_init(void)
.id = i,
.data = &rsnd_card_info[i],
.size_data = sizeof(struct asoc_simple_card_info),
- .dma_mask = ~0,
+ .dma_mask = DMA_BIT_MASK(32),
};
platform_device_register_full(&cardinfo);
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index fe689b7..bc40b85 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index af06753..e721d2c 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index c9e72c8..bce0d42 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -12,3 +12,4 @@ config ARCH_SUNXI
select PINCTRL_SUNXI
select SPARSE_IRQ
select SUN4I_TIMER
+ select SUN5I_HSTIMER
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6d5ba9a..3387e60 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
for (i = 0; i < (1 << compound_order(page)); i++) {
- void *addr = kmap_atomic(page);
+ void *addr = kmap_atomic(page + i);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr);
}
} else {
for (i = 0; i < (1 << compound_order(page)); i++) {
- void *addr = kmap_high_get(page);
+ void *addr = kmap_high_get(page + i);
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_high(page);
+ kunmap_high(page + i);
}
}
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1f7b19a..3e8f106e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
- arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
+ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 9ed155a..271b5e9 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -641,10 +641,10 @@ load_ind:
emit(ARM_MUL(r_A, r_A, r_X), ctx);
break;
case BPF_S_ALU_DIV_K:
- /* current k == reciprocal_value(userspace k) */
+ if (k == 1)
+ break;
emit_mov_i(r_scratch, k, ctx);
- /* A = top 32 bits of the product */
- emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
+ emit_udiv(r_A, r_A, r_scratch, ctx);
break;
case BPF_S_ALU_DIV_X:
update_on_xread(ctx);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6d4dd22..dd4327f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2,6 +2,7 @@ config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
@@ -11,19 +12,27 @@ config ARM64
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS
select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
@@ -275,6 +284,24 @@ config SYSVIPC_COMPAT
endmenu
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
+config ARM64_CPU_SUSPEND
+ def_bool PM_SLEEP
+
+endmenu
+
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 519c4b2..4a06090 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -224,7 +224,7 @@
virtio_block@0130000 {
compatible = "virtio,mmio";
- reg = <0x130000 0x1000>;
+ reg = <0x130000 0x200>;
interrupts = <42>;
};
};
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index b45e5f3..2f2ecd2 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -183,6 +183,12 @@
clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
clock-names = "clcdclk", "apb_pclk";
};
+
+ virtio_block@0130000 {
+ compatible = "virtio,mmio";
+ reg = <0x130000 0x200>;
+ interrupts = <42>;
+ };
};
v2m_fixed_3v3: fixedregulator@0 {
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 519f89f..d0ff25d 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -26,7 +26,6 @@ generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
-generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index d4a6333..78e20ba 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,10 +35,60 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#else
+
#define smp_mb() asm volatile("dmb ish" : : : "memory")
#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ } \
+ ___p1; \
+})
+
#endif
#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3914c0d..56166d7 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -158,17 +158,23 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
return ret;
}
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
+ __ret; \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index c4cdb5e..15241307 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -39,6 +39,9 @@ struct device_node;
* from the cpu to be killed.
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
+ * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
+ * to wrong parameters or error conditions. Called from the
+ * CPU being suspended. Must be called with IRQs disabled.
*/
struct cpu_operations {
const char *name;
@@ -50,6 +53,9 @@ struct cpu_operations {
int (*cpu_disable)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ int (*cpu_suspend)(unsigned long);
+#endif
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 5fe138e..c404fb0 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -16,23 +16,23 @@
#ifndef __ASM_CPUTYPE_H
#define __ASM_CPUTYPE_H
-#define ID_MIDR_EL1 "midr_el1"
-#define ID_MPIDR_EL1 "mpidr_el1"
-#define ID_CTR_EL0 "ctr_el0"
-
-#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
-#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1"
-#define ID_AA64AFR0_EL1 "id_aa64afr0_el1"
-#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
-#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
-
#define INVALID_HWID ULONG_MAX
#define MPIDR_HWID_BITMASK 0xff00ffffff
+#define MPIDR_LEVEL_BITS_SHIFT 3
+#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+
+#define MPIDR_LEVEL_SHIFT(level) \
+ (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+
#define read_cpuid(reg) ({ \
u64 __val; \
- asm("mrs %0, " reg : "=r" (__val)); \
+ asm("mrs %0, " #reg : "=r" (__val)); \
__val; \
})
@@ -54,12 +54,12 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(ID_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(ID_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -74,7 +74,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(ID_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index a2232d0..6231479 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -62,6 +62,27 @@ struct task_struct;
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+#define DBG_HOOK_HANDLED 0
+#define DBG_HOOK_ERROR 1
+
+struct step_hook {
+ struct list_head node;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_step_hook(struct step_hook *hook);
+void unregister_step_hook(struct step_hook *hook);
+
+struct break_hook {
+ struct list_head node;
+ u32 esr_val;
+ u32 esr_mask;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_break_hook(struct break_hook *hook);
+void unregister_break_hook(struct break_hook *hook);
+
u8 debug_monitors_arch(void);
void enable_debug_monitors(enum debug_el el);
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..d6aacb6
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_DMA_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c582fa3..78cc3ab 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -30,6 +30,7 @@
" cbnz %w3, 1b\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
+" .align 2\n" \
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 990c051..ae4801d 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 4
+#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
new file mode 100644
index 0000000..c44ad39
--- /dev/null
+++ b/arch/arm64/include/asm/insn.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_INSN_H
+#define __ASM_INSN_H
+#include <linux/types.h>
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+ * AArch64 main encoding table
+ * Bit position
+ * 28 27 26 25 Encoding Group
+ * 0 0 - - Unallocated
+ * 1 0 0 - Data processing, immediate
+ * 1 0 1 - Branch, exception generation and system instructions
+ * - 1 - 0 Loads and stores
+ * - 1 0 1 Data processing - register
+ * 0 1 1 1 Data processing - SIMD and floating point
+ * 1 1 1 1 Data processing - SIMD and floating point
+ * "-" means "don't care"
+ */
+enum aarch64_insn_encoding_class {
+ AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
+ AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
+ AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
+ AARCH64_INSN_CLS_LDST, /* Loads and stores */
+ AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
+ * system instructions */
+};
+
+enum aarch64_insn_hint_op {
+ AARCH64_INSN_HINT_NOP = 0x0 << 5,
+ AARCH64_INSN_HINT_YIELD = 0x1 << 5,
+ AARCH64_INSN_HINT_WFE = 0x2 << 5,
+ AARCH64_INSN_HINT_WFI = 0x3 << 5,
+ AARCH64_INSN_HINT_SEV = 0x4 << 5,
+ AARCH64_INSN_HINT_SEVL = 0x5 << 5,
+};
+
+enum aarch64_insn_imm_type {
+ AARCH64_INSN_IMM_ADR,
+ AARCH64_INSN_IMM_26,
+ AARCH64_INSN_IMM_19,
+ AARCH64_INSN_IMM_16,
+ AARCH64_INSN_IMM_14,
+ AARCH64_INSN_IMM_12,
+ AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_MAX
+};
+
+enum aarch64_insn_branch_type {
+ AARCH64_INSN_BRANCH_NOLINK,
+ AARCH64_INSN_BRANCH_LINK,
+};
+
+#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
+static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
+{ return (code & (mask)) == (val); } \
+static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
+{ return (val); }
+
+__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
+__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
+__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
+__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
+__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+
+#undef __AARCH64_INSN_FUNCS
+
+bool aarch64_insn_is_nop(u32 insn);
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm);
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
+u32 aarch64_insn_gen_nop(void);
+
+bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 5727697..4cc813e 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
-#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
new file mode 100644
index 0000000..076a1c7
--- /dev/null
+++ b/arch/arm64/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/include/asm/jump_label.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+#include <linux/types.h>
+#include <asm/insn.h>
+
+#ifdef __KERNEL__
+
+#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm goto("1: nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(key) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __KERNEL__ */
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3776217..9dc5dc3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -146,8 +146,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
- ((void *)(kaddr) < (void *)high_memory))
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#endif
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
new file mode 100644
index 0000000..13fb0b3
--- /dev/null
+++ b/arch/arm64/include/asm/percpu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+}
+
+static inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long off;
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
+
+ return off;
+}
+#define __my_cpu_offset __my_cpu_offset()
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 7cdf466..0c657bb 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -26,11 +26,14 @@
#include <asm/page.h>
struct mm_struct;
+struct cpu_suspend_ctx;
extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
+extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index ed43a0d..59e2823 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -21,6 +21,19 @@
#include <asm/types.h>
+struct mpidr_hash {
+ u64 mask;
+ u32 shift_aff[4];
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
+
/*
* Logical CPU mapping.
*/
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
new file mode 100644
index 0000000..e9c149c
--- /dev/null
+++ b/arch/arm64/include/asm/suspend.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_SUSPEND_H
+#define __ASM_SUSPEND_H
+
+#define NR_CTX_REGS 11
+
+/*
+ * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
+ * the stack, which must be 16-byte aligned on v8
+ */
+struct cpu_suspend_ctx {
+ /*
+ * This struct must be kept in sync with
+ * cpu_do_{suspend/resume} in mm/proc.S
+ */
+ u64 ctx_regs[NR_CTX_REGS];
+ u64 sp;
+} __aligned(16);
+
+struct sleep_save_sp {
+ phys_addr_t *save_ptr_stash;
+ phys_addr_t save_ptr_stash_phys;
+};
+
+extern void cpu_resume(void);
+extern int cpu_suspend(unsigned long);
+
+#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 7ecc2b2..6c0f684 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -100,6 +100,7 @@ static inline void set_fs(mm_segment_t fs)
})
#define access_ok(type, addr, size) __range_ok(addr, size)
+#define user_addr_max get_fs
/*
* The "__xxx" versions of the user access functions do not verify the address
@@ -240,9 +241,6 @@ extern unsigned long __must_check __copy_to_user(void __user *to, const void *fr
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -276,24 +274,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
-static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- res = __strncpy_from_user(dst, src, count);
- return res;
-}
-
-#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-static inline long __must_check strnlen_user(const char __user *s, long n)
-{
- unsigned long res = 0;
-
- if (__addr_ok(s))
- res = __strnlen_user(s, n);
-
- return res;
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..aab5bf0
--- /dev/null
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_WORD_AT_A_TIME_H
+#define __ASM_WORD_AT_A_TIME_H
+
+#ifndef __AARCH64EB__
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __AARCH64EB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, %3\n"
+ "2:\n"
+ " .pushsection .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x7\n"
+ " bic %2, %2, #0x7\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+#ifndef __AARCH64EB__
+ " lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Q" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* __ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 9b12476..73cf0f5 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -22,6 +22,10 @@
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
#define HWCAP_EVTSTRM (1 << 2)
-
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ba2fd4..2d4554b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,7 +9,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o cpu_ops.o
+ hyp-stub.o psci.o cpu_ops.o insn.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
@@ -18,6 +18,8 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index e7ee770..338b568 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -29,13 +29,10 @@
#include <asm/checksum.h>
- /* user mem (segment) */
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+ /* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 666e231..646f888 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -25,6 +25,8 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/cputable.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
#include <linux/kbuild.h>
@@ -138,5 +140,14 @@ int main(void)
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
+ DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
+ DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
+ DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
+ DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
+ DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
+ DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 4ae6857..636ba8b 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -187,6 +187,48 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
regs->pstate = spsr;
}
+/* EL1 Single Step Handler hooks */
+static LIST_HEAD(step_hook);
+DEFINE_RWLOCK(step_hook_lock);
+
+void register_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_add(&hook->node, &step_hook);
+ write_unlock(&step_hook_lock);
+}
+
+void unregister_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&step_hook_lock);
+}
+
+/*
+ * Call registered single step handers
+ * There is no Syndrome info to check for determining the handler.
+ * So we call all the registered handlers, until the right handler is
+ * found which returns zero.
+ */
+static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct step_hook *hook;
+ int retval = DBG_HOOK_ERROR;
+
+ read_lock(&step_hook_lock);
+
+ list_for_each_entry(hook, &step_hook, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+ read_unlock(&step_hook_lock);
+
+ return retval;
+}
+
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -214,7 +256,9 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
*/
user_rewind_single_step(current);
} else {
- /* TODO: route to KGDB */
+ if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
pr_warning("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
@@ -226,11 +270,53 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
return 0;
}
+/*
+ * Breakpoint handler is re-entrant as another breakpoint can
+ * hit within breakpoint handler, especically in kprobes.
+ * Use reader/writer locks instead of plain spinlock.
+ */
+static LIST_HEAD(break_hook);
+DEFINE_RWLOCK(break_hook_lock);
+
+void register_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_add(&hook->node, &break_hook);
+ write_unlock(&break_hook_lock);
+}
+
+void unregister_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&break_hook_lock);
+}
+
+static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct break_hook *hook;
+ int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+
+ read_lock(&break_hook_lock);
+ list_for_each_entry(hook, &break_hook, node)
+ if ((esr & hook->esr_mask) == hook->esr_val)
+ fn = hook->fn;
+ read_unlock(&break_hook_lock);
+
+ return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+}
+
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
siginfo_t info;
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
+ pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
+ (long)instruction_pointer(regs), esr);
+
if (!user_mode(regs))
return -EFAULT;
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 4d2c6f3..39ac630 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -288,6 +288,8 @@ el1_dbg:
/*
* Debug exception handling
*/
+ cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
@@ -314,7 +316,7 @@ el1_irq:
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index bb785d2..4aef42a 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -113,6 +114,39 @@ EXPORT_SYMBOL(kernel_neon_end);
#endif /* CONFIG_KERNEL_MODE_NEON */
+#ifdef CONFIG_CPU_PM
+static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ if (current->mm)
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ break;
+ case CPU_PM_EXIT:
+ if (current->mm)
+ fpsimd_load_state(&current->thread.fpsimd_state);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_pm_notifier_block = {
+ .notifier_call = fpsimd_cpu_pm_notifier,
+};
+
+static void fpsimd_pm_init(void)
+{
+ cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
+}
+
+#else
+static inline void fpsimd_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
/*
* FP/SIMD support code initialisation.
*/
@@ -131,6 +165,8 @@ static int __init fpsimd_init(void)
else
elf_hwcap |= HWCAP_ASIMD;
+ fpsimd_pm_init();
+
return 0;
}
late_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c68cca5..0b281ff 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -482,8 +482,6 @@ ENDPROC(__create_page_tables)
.type __switch_data, %object
__switch_data:
.quad __mmap_switched
- .quad __data_loc // x4
- .quad _data // x5
.quad __bss_start // x6
.quad _end // x7
.quad processor_id // x4
@@ -498,15 +496,7 @@ __switch_data:
__mmap_switched:
adr x3, __switch_data + 8
- ldp x4, x5, [x3], #16
ldp x6, x7, [x3], #16
- cmp x4, x5 // Copy data segment if needed
-1: ccmp x5, x6, #4, ne
- b.eq 2f
- ldr x16, [x4], #8
- str x16, [x5], #8
- b 1b
-2:
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index ff516f6..f17f581 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,6 +20,7 @@
#define pr_fmt(fmt) "hw-breakpoint: " fmt
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
@@ -169,15 +170,68 @@ static enum debug_el debug_exception_level(int privilege)
}
}
-/*
- * Install a perf counter breakpoint.
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+ HW_BREAKPOINT_RESTORE
+};
+
+/**
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to
+ * operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
*/
-int arch_install_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_RESTORE:
+ if (*slot == bp)
+ return i;
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
+ struct perf_event **slots;
struct debug_info *debug_info = &current->thread.debug;
int i, max_slots, ctrl_reg, val_reg, reg_enable;
+ enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
u32 ctrl;
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -196,67 +250,54 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
reg_enable = !debug_info->wps_disabled;
}
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (!*slot) {
- *slot = bp;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return -ENOSPC;
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
- /* Ensure debug monitors are enabled at the correct exception level. */
- enable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
- /* Setup the address register. */
- write_wb_reg(val_reg, i, info->address);
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /*
+ * Ensure debug monitors are enabled at the correct exception
+ * level.
+ */
+ enable_debug_monitors(dbg_el);
+ /* Fall through */
+ case HW_BREAKPOINT_RESTORE:
+ /* Setup the address register. */
+ write_wb_reg(val_reg, i, info->address);
+
+ /* Setup the control register. */
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(ctrl_reg, i,
+ reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the control register. */
+ write_wb_reg(ctrl_reg, i, 0);
- /* Setup the control register. */
- ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ /*
+ * Release the debug monitors for the correct exception
+ * level.
+ */
+ disable_debug_monitors(dbg_el);
+ break;
+ }
return 0;
}
-void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
- int i, max_slots, base;
-
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
- /* Breakpoint */
- base = AARCH64_DBG_REG_BCR;
- slots = this_cpu_ptr(bp_on_reg);
- max_slots = core_num_brps;
- } else {
- /* Watchpoint */
- base = AARCH64_DBG_REG_WCR;
- slots = this_cpu_ptr(wp_on_reg);
- max_slots = core_num_wrps;
- }
-
- /* Remove the breakpoint. */
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (*slot == bp) {
- *slot = NULL;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return;
-
- /* Reset the control register. */
- write_wb_reg(base, i, 0);
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
- /* Release the debug monitors for the correct exception level. */
- disable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
}
static int get_hbp_len(u8 hbp_len)
@@ -806,18 +847,36 @@ void hw_breakpoint_thread_switch(struct task_struct *next)
/*
* CPU initialisation.
*/
-static void reset_ctrl_regs(void *unused)
+static void hw_breakpoint_reset(void *unused)
{
int i;
-
- for (i = 0; i < core_num_brps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ struct perf_event **slots;
+ /*
+ * When a CPU goes through cold-boot, it does not have any installed
+ * slot, so it is safe to share the same function for restoring and
+ * resetting breakpoints; when a CPU is hotplugged in, it goes
+ * through the slots, which are all empty, hence it just resets control
+ * and value for debug registers.
+ * When this function is triggered on warm-boot through a CPU PM
+ * notifier some slots might be initialized; if so they are
+ * reprogrammed according to the debug slots content.
+ */
+ for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ }
}
- for (i = 0; i < core_num_wrps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ }
}
}
@@ -827,7 +886,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self,
{
int cpu = (long)hcpu;
if (action == CPU_ONLINE)
- smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1);
+ smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}
@@ -835,6 +894,14 @@ static struct notifier_block hw_breakpoint_reset_nb = {
.notifier_call = hw_breakpoint_reset_notify,
};
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
+#else
+static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+}
+#endif
+
/*
* One-time initialisation.
*/
@@ -850,8 +917,8 @@ static int __init arch_hw_breakpoint_init(void)
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
- smp_call_function(reset_ctrl_regs, NULL, 1);
- reset_ctrl_regs(NULL);
+ smp_call_function(hw_breakpoint_reset, NULL, 1);
+ hw_breakpoint_reset(NULL);
/* Register debug fault handlers. */
hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
@@ -861,6 +928,8 @@ static int __init arch_hw_breakpoint_init(void)
/* Register hotplug notifier. */
register_cpu_notifier(&hw_breakpoint_reset_nb);
+ /* Register cpu_suspend hw breakpoint restore hook */
+ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
return 0;
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
new file mode 100644
index 0000000..92f3683
--- /dev/null
+++ b/arch/arm64/kernel/insn.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+static int aarch64_insn_encoding_class[] = {
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+};
+
+enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
+{
+ return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
+}
+
+/* NOP is an alias of HINT */
+bool __kprobes aarch64_insn_is_nop(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_YIELD:
+ case AARCH64_INSN_HINT_WFE:
+ case AARCH64_INSN_HINT_WFI:
+ case AARCH64_INSN_HINT_SEV:
+ case AARCH64_INSN_HINT_SEVL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ u32 val;
+
+ ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ insn = cpu_to_le32(insn);
+ return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+}
+
+static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
+{
+ if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
+ return false;
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_svc(insn) ||
+ aarch64_insn_is_hvc(insn) ||
+ aarch64_insn_is_smc(insn) ||
+ aarch64_insn_is_brk(insn) ||
+ aarch64_insn_is_nop(insn);
+}
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section B2.6.5 "Concurrent modification and execution of instructions":
+ * Concurrent modification and execution of instructions can lead to the
+ * resulting instruction performing any behavior that can be achieved by
+ * executing any sequence of instructions that can be executed from the
+ * same Exception level, except where the instruction before modification
+ * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
+ * or SMC instruction.
+ */
+bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
+{
+ return __aarch64_insn_hotpatch_safe(old_insn) &&
+ __aarch64_insn_hotpatch_safe(new_insn);
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /*
+ * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
+ * which ends with "dsb; isb" pair guaranteeing global
+ * visibility.
+ */
+ atomic_set(&pp->cpu_count, -1);
+ } else {
+ while (atomic_read(&pp->cpu_count) != -1)
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ int ret;
+ u32 insn;
+
+ /* Unsafe to patch multiple instructions without synchronizaiton */
+ if (cnt == 1) {
+ ret = aarch64_insn_read(addrs[0], &insn);
+ if (ret)
+ return ret;
+
+ if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
+ /*
+ * ARMv8 architecture doesn't guarantee all CPUs see
+ * the new instruction after returning from function
+ * aarch64_insn_patch_text_nosync(). So send IPIs to
+ * all other CPUs to achieve instruction
+ * synchronization.
+ */
+ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
+ kick_all_cpus_sync();
+ return ret;
+ }
+ }
+
+ return aarch64_insn_patch_text_sync(addrs, insns, cnt);
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, lomask, himask, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ lomask = 0x3;
+ himask = 0x7ffff;
+ immlo = imm & lomask;
+ imm >>= 2;
+ immhi = imm & himask;
+ imm = (immlo << 24) | (immhi);
+ mask = (lomask << 24) | (himask);
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9) - 1;
+ shift = 12;
+ break;
+ default:
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ /*
+ * PC: A 64-bit Program Counter holding the address of the current
+ * instruction. A64 instructions must be word-aligned.
+ */
+ BUG_ON((pc & 0x3) || (addr & 0x3));
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+
+ if (type == AARCH64_INSN_BRANCH_LINK)
+ insn = aarch64_insn_get_bl_value();
+ else
+ insn = aarch64_insn_get_b_value();
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
new file mode 100644
index 0000000..263a166
--- /dev/null
+++ b/arch/arm64/kernel/jump_label.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/kernel/jump_label.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ u32 insn;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn = aarch64_insn_gen_branch_imm(entry->code,
+ entry->target,
+ AARCH64_INSN_BRANCH_NOLINK);
+ } else {
+ insn = aarch64_insn_gen_nop();
+ }
+
+ if (is_static)
+ aarch64_insn_patch_text_nosync(addr, insn);
+ else
+ aarch64_insn_patch_text(&addr, &insn, 1);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index e2ad0d8..1eb1cc9 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,10 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/insn.h>
+
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
void *module_alloc(unsigned long size)
{
@@ -94,28 +98,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
return 0;
}
-enum aarch64_imm_type {
- INSN_IMM_MOVNZ,
- INSN_IMM_MOVK,
- INSN_IMM_ADR,
- INSN_IMM_26,
- INSN_IMM_19,
- INSN_IMM_16,
- INSN_IMM_14,
- INSN_IMM_12,
- INSN_IMM_9,
-};
-
-static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
+static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u32 immlo, immhi, lomask, himask, mask;
- int shift;
+ u64 imm, limit = 0;
+ s64 sval;
+ u32 insn = le32_to_cpu(*(u32 *)place);
- /* The instruction stream is always little endian. */
- insn = le32_to_cpu(insn);
+ sval = do_reloc(op, place, val);
+ sval >>= lsb;
+ imm = sval & 0xffff;
- switch (type) {
- case INSN_IMM_MOVNZ:
+ if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
* For signed MOVW relocations, we have to manipulate the
* instruction encoding depending on whether or not the
@@ -134,70 +128,12 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
*/
imm = ~imm;
}
- case INSN_IMM_MOVK:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
- case INSN_IMM_26:
- mask = BIT(26) - 1;
- shift = 0;
- break;
- case INSN_IMM_19:
- mask = BIT(19) - 1;
- shift = 5;
- break;
- case INSN_IMM_16:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_14:
- mask = BIT(14) - 1;
- shift = 5;
- break;
- case INSN_IMM_12:
- mask = BIT(12) - 1;
- shift = 10;
- break;
- case INSN_IMM_9:
- mask = BIT(9) - 1;
- shift = 12;
- break;
- default:
- pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
- /* Update the immediate field. */
- insn &= ~(mask << shift);
- insn |= (imm & mask) << shift;
-
- return cpu_to_le32(insn);
-}
-
-static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_imm_type imm_type)
-{
- u64 imm, limit = 0;
- s64 sval;
- u32 insn = *(u32 *)place;
-
- sval = do_reloc(op, place, val);
- sval >>= lsb;
- imm = sval & 0xffff;
-
/* Update the instruction with the new encoding. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/* Shift out the immediate field. */
sval >>= 16;
@@ -206,9 +142,9 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* For unsigned immediates, the overflow check is straightforward.
* For signed immediates, the sign bit is actually the bit past the
* most significant bit of the field.
- * The INSN_IMM_16 immediate type is unsigned.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
*/
- if (imm_type != INSN_IMM_16) {
+ if (imm_type != AARCH64_INSN_IMM_16) {
sval++;
limit++;
}
@@ -221,11 +157,11 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
}
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, int len, enum aarch64_imm_type imm_type)
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
u64 imm, imm_mask;
s64 sval;
- u32 insn = *(u32 *)place;
+ u32 insn = le32_to_cpu(*(u32 *)place);
/* Calculate the relocation value. */
sval = do_reloc(op, place, val);
@@ -236,7 +172,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
imm = sval & imm_mask;
/* Update the instruction's immediate field. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/*
* Extract the upper value bits (including the sign bit) and
@@ -318,125 +255,125 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
/* Immediate instruction relocations. */
case R_AARCH64_LD_PREL_LO19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_ADR_PREL_LO21:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_TSTBR14:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
- INSN_IMM_14);
+ AARCH64_INSN_IMM_14);
break;
case R_AARCH64_CONDBR19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
- INSN_IMM_26);
+ AARCH64_INSN_IMM_26);
break;
default:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 0e63c98..5b1cd79 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -22,6 +22,7 @@
#include <linux/bitmap.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/perf_event.h>
@@ -363,26 +364,53 @@ validate_group(struct perf_event *event)
}
static void
+armpmu_disable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ disable_percpu_irq(irq);
+}
+
+static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
+ int irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs)
+ return;
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, armpmu);
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0)
+ return;
+
+ if (irq_is_percpu(irq)) {
+ on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &cpu_hw_events);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq > 0)
+ free_irq(irq, armpmu);
+ }
}
}
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- int i, err, irq, irqs;
+ int err, irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
@@ -391,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
+ if (!irqs) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0) {
+ pr_err("failed to get valid irq for PMU device\n");
+ return -ENODEV;
+ }
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
- continue;
- }
+ if (irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, armpmu->handle_irq,
+ "arm-pmu", &cpu_hw_events);
- err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
- "arm-pmu", armpmu);
if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
+ pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
+ irq);
armpmu_release_hardware(armpmu);
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq <= 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, armpmu->handle_irq,
+ IRQF_NOBALANCING,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
}
return 0;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index de17c89..248a15d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -33,6 +33,7 @@
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
+#include <linux/cpuidle.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
@@ -98,8 +99,10 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt
* tricks
*/
- cpu_do_idle();
- local_irq_enable();
+ if (cpuidle_idle_call()) {
+ cpu_do_idle();
+ local_irq_enable();
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -308,6 +311,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -315,9 +319,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index bd9bbd0..c8e9eff 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -108,20 +108,95 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+void __init smp_setup_processor_id(void)
+{
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
+}
+
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpu_logical_map(cpu);
}
+struct mpidr_hash mpidr_hash;
+#ifdef CONFIG_SMP
+/**
+ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
+ * level in order to build a linear index from an
+ * MPIDR value. Resulting algorithm is a collision
+ * free hash carried out through shifting and ORing
+ */
+static void __init smp_build_mpidr_hash(void)
+{
+ u32 i, affinity, fs[4], bits[4], ls;
+ u64 mask = 0;
+ /*
+ * Pre-scan the list of MPIDRS and filter out bits that do
+ * not contribute to affinity levels, ie they never toggle.
+ */
+ for_each_possible_cpu(i)
+ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
+ pr_debug("mask of set bits %#llx\n", mask);
+ /*
+ * Find and stash the last and first bit set at all affinity levels to
+ * check how many bits are required to represent them.
+ */
+ for (i = 0; i < 4; i++) {
+ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
+ /*
+ * Find the MSB bit and LSB bits position
+ * to determine how many bits are required
+ * to express the affinity level.
+ */
+ ls = fls(affinity);
+ fs[i] = affinity ? ffs(affinity) - 1 : 0;
+ bits[i] = ls - fs[i];
+ }
+ /*
+ * An index can be created from the MPIDR_EL1 by isolating the
+ * significant bits at each affinity level and by shifting
+ * them in order to compress the 32 bits values space to a
+ * compressed set of values. This is equivalent to hashing
+ * the MPIDR_EL1 through shifting and ORing. It is a collision free
+ * hash though not minimal since some levels might contain a number
+ * of CPUs that is not an exact power of 2 and their bit
+ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
+ */
+ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
+ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
+ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
+ (bits[1] + bits[0]);
+ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
+ fs[3] - (bits[2] + bits[1] + bits[0]);
+ mpidr_hash.mask = mask;
+ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
+ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
+ mpidr_hash.shift_aff[0],
+ mpidr_hash.shift_aff[1],
+ mpidr_hash.shift_aff[2],
+ mpidr_hash.shift_aff[3],
+ mpidr_hash.mask,
+ mpidr_hash.bits);
+ /*
+ * 4x is an arbitrary value used to warn on a hash table much bigger
+ * than expected on most systems.
+ */
+ if (mpidr_hash_size() > 4 * num_possible_cpus())
+ pr_warn("Large number of MPIDR hash buckets detected\n");
+ __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
+}
+#endif
+
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
+ u64 features, block;
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc.S
- */
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
printk("CPU configuration botched (ID %08x), unable to continue.\n",
@@ -136,6 +211,37 @@ static void __init setup_processor(void)
sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
+
+ /*
+ * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
+ * The blocks we test below represent incremental functionality
+ * for non-negative values. Negative values are reserved.
+ */
+ features = read_cpuid(ID_AA64ISAR0_EL1);
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ elf_hwcap |= HWCAP_PMULL;
+ case 1:
+ elf_hwcap |= HWCAP_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -236,6 +342,7 @@ void __init setup_arch(char **cmdline_p)
cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
+ smp_build_mpidr_hash();
#endif
#ifdef CONFIG_VT
@@ -275,6 +382,11 @@ static const char *hwcap_str[] = {
"fp",
"asimd",
"evtstrm",
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
NULL
};
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 0000000..b192572
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,184 @@
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+/*
+ * Implementation of MPIDR_EL1 hash algorithm through shifting
+ * and OR'ing.
+ *
+ * @dst: register containing hash result
+ * @rs0: register containing affinity level 0 bit shift
+ * @rs1: register containing affinity level 1 bit shift
+ * @rs2: register containing affinity level 2 bit shift
+ * @rs3: register containing affinity level 3 bit shift
+ * @mpidr: register containing MPIDR_EL1 value
+ * @mask: register containing MPIDR mask
+ *
+ * Pseudo C-code:
+ *
+ *u32 dst;
+ *
+ *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
+ * u32 aff0, aff1, aff2, aff3;
+ * u64 mpidr_masked = mpidr & mask;
+ * aff0 = mpidr_masked & 0xff;
+ * aff1 = mpidr_masked & 0xff00;
+ * aff2 = mpidr_masked & 0xff0000;
+ * aff2 = mpidr_masked & 0xff00000000;
+ * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
+ *}
+ * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
+ * Output register: dst
+ * Note: input and output registers must be disjoint register sets
+ (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
+ */
+ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
+ and \mpidr, \mpidr, \mask // mask out MPIDR bits
+ and \dst, \mpidr, #0xff // mask=aff0
+ lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
+ and \mask, \mpidr, #0xff00 // mask = aff1
+ lsr \mask ,\mask, \rs1
+ orr \dst, \dst, \mask // dst|=(aff1>>rs1)
+ and \mask, \mpidr, #0xff0000 // mask = aff2
+ lsr \mask ,\mask, \rs2
+ orr \dst, \dst, \mask // dst|=(aff2>>rs2)
+ and \mask, \mpidr, #0xff00000000 // mask = aff3
+ lsr \mask ,\mask, \rs3
+ orr \dst, \dst, \mask // dst|=(aff3>>rs3)
+ .endm
+/*
+ * Save CPU state for a suspend. This saves callee registers, and allocates
+ * space on the kernel stack to save the CPU specific registers + some
+ * other data for resume.
+ *
+ * x0 = suspend finisher argument
+ */
+ENTRY(__cpu_suspend)
+ stp x29, lr, [sp, #-96]!
+ stp x19, x20, [sp,#16]
+ stp x21, x22, [sp,#32]
+ stp x23, x24, [sp,#48]
+ stp x25, x26, [sp,#64]
+ stp x27, x28, [sp,#80]
+ mov x2, sp
+ sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
+ mov x1, sp
+ /*
+ * x1 now points to struct cpu_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #CPU_CTX_SP]
+ ldr x2, =sleep_save_sp
+ ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
+#ifdef CONFIG_SMP
+ mrs x7, mpidr_el1
+ ldr x9, =mpidr_hash
+ ldr x10, [x9, #MPIDR_HASH_MASK]
+ /*
+ * Following code relies on the struct mpidr_hash
+ * members size.
+ */
+ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
+ add x2, x2, x8, lsl #3
+#endif
+ bl __cpu_suspend_finisher
+ /*
+ * Never gets here, unless suspend fails.
+ * Successful cpu_suspend should return from cpu_resume, returning
+ * through this code path is considered an error
+ * If the return value is set to 0 force x0 = -EOPNOTSUPP
+ * to make sure a proper error condition is propagated
+ */
+ cmp x0, #0
+ mov x3, #-EOPNOTSUPP
+ csel x0, x3, x0, eq
+ add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(__cpu_suspend)
+ .ltorg
+
+/*
+ * x0 must contain the sctlr value retrieved from restored context
+ */
+ENTRY(cpu_resume_mmu)
+ ldr x3, =cpu_resume_after_mmu
+ msr sctlr_el1, x0 // restore sctlr_el1
+ isb
+ br x3 // global jump to virtual address
+ENDPROC(cpu_resume_mmu)
+cpu_resume_after_mmu:
+ mov x0, #0 // return zero on success
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(cpu_resume_after_mmu)
+
+ .data
+ENTRY(cpu_resume)
+ bl el2_setup // if in EL2 drop to EL1 cleanly
+#ifdef CONFIG_SMP
+ mrs x1, mpidr_el1
+ adr x4, mpidr_hash_ptr
+ ldr x5, [x4]
+ add x8, x4, x5 // x8 = struct mpidr_hash phys address
+ /* retrieve mpidr_hash members to compute the hash */
+ ldr x2, [x8, #MPIDR_HASH_MASK]
+ ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
+ /* x7 contains hash index, let's use it to grab context pointer */
+#else
+ mov x7, xzr
+#endif
+ adr x0, sleep_save_sp
+ ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr x0, [x0, x7, lsl #3]
+ /* load sp from context */
+ ldr x2, [x0, #CPU_CTX_SP]
+ adr x1, sleep_idmap_phys
+ /* load physical address of identity map page table in x1 */
+ ldr x1, [x1]
+ mov sp, x2
+ /*
+ * cpu_do_resume expects x0 to contain context physical address
+ * pointer and x1 to contain physical address of 1:1 page tables
+ */
+ bl cpu_do_resume // PC relative jump, MMU off
+ b cpu_resume_mmu // Resume MMU, never returns
+ENDPROC(cpu_resume)
+
+ .align 3
+mpidr_hash_ptr:
+ /*
+ * offset of mpidr_hash symbol from current location
+ * used to obtain run-time mpidr_hash address with MMU off
+ */
+ .quad mpidr_hash - .
+/*
+ * physical address of identity mapped page tables
+ */
+ .type sleep_idmap_phys, #object
+ENTRY(sleep_idmap_phys)
+ .quad 0
+/*
+ * struct sleep_save_sp {
+ * phys_addr_t *save_ptr_stash;
+ * phys_addr_t save_ptr_stash_phys;
+ * };
+ */
+ .type sleep_save_sp, #object
+ENTRY(sleep_save_sp)
+ .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a0c2ca6..1b7617a 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -61,6 +61,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_TIMER,
};
/*
@@ -122,8 +123,6 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
- printk("CPU%u: Booted secondary processor\n", cpu);
-
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -132,6 +131,9 @@ asmlinkage void secondary_start_kernel(void)
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -271,6 +273,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
@@ -447,6 +450,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_TIMER, "Timer broadcast interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -532,6 +536,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ case IPI_TIMER:
+ irq_enter();
+ tick_receive_broadcast();
+ irq_exit();
+ break;
+#endif
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
@@ -544,6 +556,13 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d25459f..c3b6c63 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
frame->sp = fp + 0x10;
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 0000000..430344e
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,132 @@
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/debug-monitors.h>
+#include <asm/pgtable.h>
+#include <asm/memory.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+extern int __cpu_suspend(unsigned long);
+/*
+ * This is called by __cpu_suspend() to save the state, and do whatever
+ * flushing is required to ensure that when the CPU goes to sleep we have
+ * the necessary data available when the caches are not searched.
+ *
+ * @arg: Argument to pass to suspend operations
+ * @ptr: CPU context virtual address
+ * @save_ptr: address of the location where the context physical address
+ * must be saved
+ */
+int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
+ phys_addr_t *save_ptr)
+{
+ int cpu = smp_processor_id();
+
+ *save_ptr = virt_to_phys(ptr);
+
+ cpu_do_suspend(ptr);
+ /*
+ * Only flush the context that must be retrieved with the MMU
+ * off. VA primitives ensure the flush is applied to all
+ * cache levels so context is pushed to DRAM.
+ */
+ __flush_dcache_area(ptr, sizeof(*ptr));
+ __flush_dcache_area(save_ptr, sizeof(*save_ptr));
+
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * This hook is provided so that cpu_suspend code can restore HW
+ * breakpoints as early as possible in the resume path, before reenabling
+ * debug exceptions. Code cannot be run from a CPU PM notifier since by the
+ * time the notifier runs debug exceptions might have been enabled already,
+ * with HW breakpoints registers content still in an unknown state.
+ */
+void (*hw_breakpoint_restore)(void *);
+void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+ /* Prevent multiple restore hook initializations */
+ if (WARN_ON(hw_breakpoint_restore))
+ return;
+ hw_breakpoint_restore = hw_bp_restore;
+}
+
+/**
+ * cpu_suspend
+ *
+ * @arg: argument to pass to the finisher function
+ */
+int cpu_suspend(unsigned long arg)
+{
+ struct mm_struct *mm = current->active_mm;
+ int ret, cpu = smp_processor_id();
+ unsigned long flags;
+
+ /*
+ * If cpu_ops have not been registered or suspend
+ * has not been initialized, cpu_suspend call fails early.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /*
+ * From this point debug exceptions are disabled to prevent
+ * updates to mdscr register (saved and restored along with
+ * general purpose registers) from kernel debuggers.
+ */
+ local_dbg_save(flags);
+
+ /*
+ * mm context saved on the stack, it will be restored when
+ * the cpu comes out of reset through the identity mapped
+ * page tables, so that the thread address space is properly
+ * set-up on function return.
+ */
+ ret = __cpu_suspend(arg);
+ if (ret == 0) {
+ cpu_switch_mm(mm->pgd, mm);
+ flush_tlb_all();
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(NULL);
+ }
+
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
+ * renabled if it was enabled when core started shutdown.
+ */
+ local_dbg_restore(flags);
+
+ return ret;
+}
+
+extern struct sleep_save_sp sleep_save_sp;
+extern phys_addr_t sleep_idmap_phys;
+
+static int cpu_suspend_init(void)
+{
+ void *ctx_ptr;
+
+ /* ctx_ptr is an array of physical addresses */
+ ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+
+ if (WARN_ON(!ctx_ptr))
+ return -ENOMEM;
+
+ sleep_save_sp.save_ptr_stash = ctx_ptr;
+ sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
+ sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
+ __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
+ __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
+
+ return 0;
+}
+early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5161ad9..4ba7a55 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -99,17 +99,14 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_data = .;
- __data_loc = _data - LOAD_OFFSET;
_sdata = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
_edata = .;
- _edata_loc = __data_loc + SIZEOF(.data);
BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
- .comment 0 : { *(.comment) }
}
/*
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 59acc0e..328ce1a9 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,6 +1,4 @@
-lib-y := bitops.o delay.o \
- strncpy_from_user.o strnlen_user.o clear_user.o \
- copy_from_user.o copy_to_user.o copy_in_user.o \
- copy_page.o clear_page.o \
- memchr.o memcpy.o memmove.o memset.o \
+lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+ copy_to_user.o copy_in_user.o copy_page.o \
+ clear_page.o memchr.o memcpy.o memmove.o memset.o \
strchr.o strrchr.o
diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S
deleted file mode 100644
index 56e448a..0000000
--- a/arch/arm64/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Based on arch/arm/lib/strncpy_from_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/*
- * Copy a string from user space to kernel space.
- * x0 = dst, x1 = src, x2 = byte length
- * returns the number of characters copied (strlen of copied string),
- * -EFAULT on exception, or "len" if we fill the whole buffer
- */
-ENTRY(__strncpy_from_user)
- mov x4, x1
-1: subs x2, x2, #1
- bmi 2f
-USER(9f, ldrb w3, [x1], #1 )
- strb w3, [x0], #1
- cbnz w3, 1b
- sub x1, x1, #1 // take NUL character out of count
-2: sub x0, x1, x4
- ret
-ENDPROC(__strncpy_from_user)
-
- .section .fixup,"ax"
- .align 0
-9: strb wzr, [x0] // null terminate
- mov x0, #-EFAULT
- ret
- .previous
diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S
deleted file mode 100644
index 7f7b176..0000000
--- a/arch/arm64/lib/strnlen_user.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Based on arch/arm/lib/strnlen_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/* Prototype: unsigned long __strnlen_user(const char *str, long n)
- * Purpose : get length of a string in user memory
- * Params : str - address of string in user memory
- * Returns : length of string *including terminator*
- * or zero on exception, or n if too long
- */
-ENTRY(__strnlen_user)
- mov x2, x0
-1: subs x1, x1, #1
- b.mi 2f
-USER(9f, ldrb w3, [x0], #1 )
- cbnz w3, 1b
-2: sub x0, x0, x2
- ret
-ENDPROC(__strnlen_user)
-
- .section .fixup,"ax"
- .align 0
-9: mov x0, #0
- ret
- .previous
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bd7579..45b5ab5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
@@ -33,17 +34,47 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return NULL;
+ }
+
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA32;
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return page_address(page);
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
}
static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
+ dma_release_from_contiguous(dev,
+ phys_to_page(paddr),
+ size >> PAGE_SHIFT);
+ } else {
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ }
}
static struct dma_map_ops arm64_swiotlb_dma_ops = {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0cb8742..d0b4c2e 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,7 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of_fdt.h>
+#include <linux/dma-contiguous.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -159,6 +160,8 @@ void __init arm64_memblock_init(void)
memblock_reserve(base, size);
}
+ dma_contiguous_reserve(0);
+
memblock_allow_resize();
memblock_dump_all();
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0f7fec5..bed1f1d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -80,6 +80,75 @@ ENTRY(cpu_do_idle)
ret
ENDPROC(cpu_do_idle)
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+/**
+ * cpu_do_suspend - save CPU registers context
+ *
+ * x0: virtual address of context pointer
+ */
+ENTRY(cpu_do_suspend)
+ mrs x2, tpidr_el0
+ mrs x3, tpidrro_el0
+ mrs x4, contextidr_el1
+ mrs x5, mair_el1
+ mrs x6, cpacr_el1
+ mrs x7, ttbr1_el1
+ mrs x8, tcr_el1
+ mrs x9, vbar_el1
+ mrs x10, mdscr_el1
+ mrs x11, oslsr_el1
+ mrs x12, sctlr_el1
+ stp x2, x3, [x0]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ str x12, [x0, #80]
+ ret
+ENDPROC(cpu_do_suspend)
+
+/**
+ * cpu_do_resume - restore CPU register context
+ *
+ * x0: Physical address of context pointer
+ * x1: ttbr0_el1 to be restored
+ *
+ * Returns:
+ * sctlr_el1 value in x0
+ */
+ENTRY(cpu_do_resume)
+ /*
+ * Invalidate local tlb entries before turning on MMU
+ */
+ tlbi vmalle1
+ ldp x2, x3, [x0]
+ ldp x4, x5, [x0, #16]
+ ldp x6, x7, [x0, #32]
+ ldp x8, x9, [x0, #48]
+ ldp x10, x11, [x0, #64]
+ ldr x12, [x0, #80]
+ msr tpidr_el0, x2
+ msr tpidrro_el0, x3
+ msr contextidr_el1, x4
+ msr mair_el1, x5
+ msr cpacr_el1, x6
+ msr ttbr0_el1, x1
+ msr ttbr1_el1, x7
+ msr tcr_el1, x8
+ msr vbar_el1, x9
+ msr mdscr_el1, x10
+ /*
+ * Restore oslsr_el1 by writing oslar_el1
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+ ret
+ENDPROC(cpu_do_resume)
+#endif
+
/*
* cpu_switch_mm(pgd_phys, tsk)
*
diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
index 0961275..7151007 100644
--- a/arch/avr32/include/asm/barrier.h
+++ b/arch/avr32/include/asm/barrier.h
@@ -8,22 +8,15 @@
#ifndef __ASM_AVR32_BARRIER_H
#define __ASM_AVR32_BARRIER_H
-#define nop() asm volatile("nop")
-
-#define mb() asm volatile("" : : : "memory")
-#define rmb() mb()
-#define wmb() asm volatile("sync 0" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { var = value; mb(); } while(0)
+/*
+ * Weirdest thing ever.. no full barrier, but it has a write barrier!
+ */
+#define wmb() asm volatile("sync 0" : : : "memory")
#ifdef CONFIG_SMP
# error "The AVR32 port does not support SMP"
-#else
-# define smp_mb() barrier()
-# define smp_rmb() barrier()
-# define smp_wmb() barrier()
-# define smp_read_barrier_depends() do { } while(0)
#endif
+#include <asm-generic/barrier.h>
#endif /* __ASM_AVR32_BARRIER_H */
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
index ebb1895..19283a1 100644
--- a/arch/blackfin/include/asm/barrier.h
+++ b/arch/blackfin/include/asm/barrier.h
@@ -23,26 +23,10 @@
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
-#else
-# define mb() barrier()
-# define rmb() barrier()
-# define wmb() barrier()
-# define read_barrier_depends() do { } while (0)
#endif
-#else /* !CONFIG_SMP */
-
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define read_barrier_depends() do { } while (0)
-
#endif /* !CONFIG_SMP */
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define smp_read_barrier_depends() read_barrier_depends()
+#include <asm-generic/barrier.h>
#endif /* _BLACKFIN_BARRIER_H */
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index b06caf6..199b1a9 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -3,6 +3,7 @@ header-y += arch-v10/
header-y += arch-v32/
+generic-y += barrier.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h
deleted file mode 100644
index 198ad7f..0000000
--- a/arch/cris/include/asm/barrier.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __ASM_CRIS_BARRIER_H
-#define __ASM_CRIS_BARRIER_H
-
-#define nop() __asm__ __volatile__ ("nop");
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-#define mb() barrier()
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif
-
-#endif /* __ASM_CRIS_BARRIER_H */
diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h
index 06776ad..abbef47 100644
--- a/arch/frv/include/asm/barrier.h
+++ b/arch/frv/include/asm/barrier.h
@@ -17,13 +17,7 @@
#define mb() asm volatile ("membar" : : :"memory")
#define rmb() asm volatile ("membar" : : :"memory")
#define wmb() asm volatile ("membar" : : :"memory")
-#define read_barrier_depends() do { } while (0)
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do {} while(0)
-#define set_mb(var, value) \
- do { var = (value); barrier(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* _ASM_BARRIER_H */
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 67c3450..ada843c 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -2,6 +2,7 @@
header-y += ucontext.h
generic-y += auxvec.h
+generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 8a64ff2..7aae4cb 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
index 1041a8e..4e863da 100644
--- a/arch/hexagon/include/asm/barrier.h
+++ b/arch/hexagon/include/asm/barrier.h
@@ -29,10 +29,6 @@
#define smp_read_barrier_depends() barrier()
#define smp_wmb() barrier()
#define smp_mb() barrier()
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
#define set_mb(var, value) \
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 4e4119b..a8c3a11 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -147,9 +147,6 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
-
-source "arch/ia64/xen/Kconfig"
-
endif
choice
@@ -175,7 +172,6 @@ config IA64_GENERIC
SGI-SN2 For SGI Altix systems
SGI-UV For SGI UV systems
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
- Xen-domU For xen domU system
If you don't know what to do, choose "generic".
@@ -231,14 +227,6 @@ config IA64_HP_SIM
bool "Ski-simulator"
select SWIOTLB
-config IA64_XEN_GUEST
- bool "Xen guest"
- select SWIOTLB
- depends on XEN
- help
- Build a kernel that runs on Xen guest domain. At this moment only
- 16KB page size in supported.
-
endchoice
choice
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index be7bfa1..f37238f 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
-core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
core-$(CONFIG_KVM) += arch/ia64/kvm/
-core-$(CONFIG_XEN) += arch/ia64/xen/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
deleted file mode 100644
index b025acf..0000000
--- a/arch/ia64/configs/xen_domu_defconfig
+++ /dev/null
@@ -1,199 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=20
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARAVIRT_GUEST=y
-CONFIG_IA64_XEN_GUEST=y
-CONFIG_MCKINLEY=y
-CONFIG_IA64_CYCLONE=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=16
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PERMIT_BSP_REMOVE=y
-CONFIG_FORCE_CPEI_RETARGET=y
-CONFIG_IA64_MCA_RECOVERY=y
-CONFIG_PERFMON=y
-CONFIG_IA64_PALINFO=y
-CONFIG_KEXEC=y
-CONFIG_EFI_VARS=y
-CONFIG_BINFMT_MISC=m
-CONFIG_ACPI_PROCFS=y
-CONFIG_ACPI_BUTTON=m
-CONFIG_ACPI_FAN=m
-CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_PIIX=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=y
-CONFIG_FUSION_FC=y
-CONFIG_FUSION_CTL=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
-CONFIG_E100=m
-CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_GAMEPORT=m
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=6
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
-CONFIG_RAW_DRIVER=m
-CONFIG_HPET=y
-CONFIG_AGP=m
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_SIS=m
-CONFIG_HID_GYRATION=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_TOPSEED=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_STORAGE=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_IA64_GRANULE_16MB=y
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index faa1bf0..d651102 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void)
return "uv";
# elif defined (CONFIG_IA64_DIG)
return "dig";
-# elif defined (CONFIG_IA64_XEN_GUEST)
- return "xen";
# elif defined(CONFIG_IA64_DIG_VTD)
return "dig_vtd";
# else
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 60576e0..d0a69aa 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -45,14 +45,37 @@
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
+
#else
+
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
+
#endif
/*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+/*
* XXX check on this ---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with
* Linus just yet. Grrr...
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2d1ad4b1..9c39bdf 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_SGI_UV)
# include <asm/machvec_uv.h>
-# elif defined (CONFIG_IA64_XEN_GUEST)
-# include <asm/machvec_xen.h>
# elif defined (CONFIG_IA64_GENERIC)
# ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
deleted file mode 100644
index 8b8bd0e..0000000
--- a/arch/ia64/include/asm/machvec_xen.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_IA64_MACHVEC_XEN_h
-#define _ASM_IA64_MACHVEC_XEN_h
-
-extern ia64_mv_setup_t dig_setup;
-extern ia64_mv_cpu_init_t xen_cpu_init;
-extern ia64_mv_irq_init_t xen_irq_init;
-extern ia64_mv_send_ipi_t xen_platform_send_ipi;
-
-/*
- * This stuff has dual use!
- *
- * For a generic kernel, the macros are used to initialize the
- * platform's machvec structure. When compiling a non-generic kernel,
- * the macros are used directly.
- */
-#define ia64_platform_name "xen"
-#define platform_setup dig_setup
-#define platform_cpu_init xen_cpu_init
-#define platform_irq_init xen_irq_init
-#define platform_send_ipi xen_platform_send_ipi
-
-#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 61c7b17..092f1c9 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,7 +18,6 @@
* - crash dumping code reserved region
* - Kernel memory map built from EFI memory map
* - ELF core header
- * - xen start info if CONFIG_XEN
*
* More could be added if necessary
*/
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index b149b88..b53518a 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void);
#ifdef CONFIG_PARAVIRT_GUEST
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
-#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
#ifndef __ASSEMBLY__
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
index 44ef9ef..42b233b 100644
--- a/arch/ia64/include/asm/pvclock-abi.h
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -11,7 +11,7 @@
/*
* These structs MUST NOT be changed.
* They are the ABI between hypervisor and guest OS.
- * Both Xen and KVM are using this.
+ * KVM is using this.
*
* pvclock_vcpu_time_info holds the system time and the tsc timestamp
* of the last update. So the guest can use the tsc delta to get a
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
deleted file mode 100644
index 593c12e..0000000
--- a/arch/ia64/include/asm/sync_bitops.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _ASM_IA64_SYNC_BITOPS_H
-#define _ASM_IA64_SYNC_BITOPS_H
-
-/*
- * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *
- * Based on synch_bitops.h which Dan Magenhaimer wrote.
- *
- * bit operations which provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-static inline void sync_set_bit(int nr, volatile void *addr)
-{
- set_bit(nr, addr);
-}
-
-static inline void sync_clear_bit(int nr, volatile void *addr)
-{
- clear_bit(nr, addr);
-}
-
-static inline void sync_change_bit(int nr, volatile void *addr)
-{
- change_bit(nr, addr);
-}
-
-static inline int sync_test_and_set_bit(int nr, volatile void *addr)
-{
- return test_and_set_bit(nr, addr);
-}
-
-static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
-{
- return test_and_clear_bit(nr, addr);
-}
-
-static inline int sync_test_and_change_bit(int nr, volatile void *addr)
-{
- return test_and_change_bit(nr, addr);
-}
-
-static inline int sync_test_bit(int nr, const volatile void *addr)
-{
- return test_bit(nr, addr);
-}
-
-#define sync_cmpxchg(ptr, old, new) \
- ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
-
-#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
deleted file mode 100644
index baa74c8..0000000
--- a/arch/ia64/include/asm/xen/events.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/events.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef _ASM_IA64_XEN_EVENTS_H
-#define _ASM_IA64_XEN_EVENTS_H
-
-enum ipi_vector {
- XEN_RESCHEDULE_VECTOR,
- XEN_IPI_VECTOR,
- XEN_CMCP_VECTOR,
- XEN_CPEP_VECTOR,
-
- XEN_NR_IPIS,
-};
-
-static inline int xen_irqs_disabled(struct pt_regs *regs)
-{
- return !(ia64_psr(regs)->i);
-}
-
-#define irq_ctx_init(cpu) do { } while (0)
-
-#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
deleted file mode 100644
index ed28bcd..0000000
--- a/arch/ia64/include/asm/xen/hypercall.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/******************************************************************************
- * hypercall.h
- *
- * Linux-specific hypervisor handling.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_IA64_XEN_HYPERCALL_H
-#define _ASM_IA64_XEN_HYPERCALL_H
-
-#include <xen/interface/xen.h>
-#include <xen/interface/physdev.h>
-#include <xen/interface/sched.h>
-#include <asm/xen/xcom_hcall.h>
-struct xencomm_handle;
-extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
- unsigned long a3, unsigned long a4,
- unsigned long a5, unsigned long cmd);
-
-/*
- * Assembler stubs for hyper-calls.
- */
-
-#define _hypercall0(type, name) \
-({ \
- long __res; \
- __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
- (type)__res; \
-})
-
-#define _hypercall1(type, name, a1) \
-({ \
- long __res; \
- __res = __hypercall((unsigned long)a1, \
- 0, 0, 0, 0, __HYPERVISOR_##name); \
- (type)__res; \
-})
-
-#define _hypercall2(type, name, a1, a2) \
-({ \
- long __res; \
- __res = __hypercall((unsigned long)a1, \
- (unsigned long)a2, \
- 0, 0, 0, __HYPERVISOR_##name); \
- (type)__res; \
-})
-
-#define _hypercall3(type, name, a1, a2, a3) \
-({ \
- long __res; \
- __res = __hypercall((unsigned long)a1, \
- (unsigned long)a2, \
- (unsigned long)a3, \
- 0, 0, __HYPERVISOR_##name); \
- (type)__res; \
-})
-
-#define _hypercall4(type, name, a1, a2, a3, a4) \
-({ \
- long __res; \
- __res = __hypercall((unsigned long)a1, \
- (unsigned long)a2, \
- (unsigned long)a3, \
- (unsigned long)a4, \
- 0, __HYPERVISOR_##name); \
- (type)__res; \
-})
-
-#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
-({ \
- long __res; \
- __res = __hypercall((unsigned long)a1, \
- (unsigned long)a2, \
- (unsigned long)a3, \
- (unsigned long)a4, \
- (unsigned long)a5, \
- __HYPERVISOR_##name); \
- (type)__res; \
-})
-
-
-static inline int
-xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
-{
- return _hypercall2(int, sched_op, cmd, arg);
-}
-
-static inline long
-HYPERVISOR_set_timer_op(u64 timeout)
-{
- unsigned long timeout_hi = (unsigned long)(timeout >> 32);
- unsigned long timeout_lo = (unsigned long)timeout;
- return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-}
-
-static inline int
-xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
- int nr_calls)
-{
- return _hypercall2(int, multicall, call_list, nr_calls);
-}
-
-static inline int
-xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
-{
- return _hypercall2(int, memory_op, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
-{
- return _hypercall2(int, event_channel_op, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
-{
- return _hypercall2(int, xen_version, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_console_io(int cmd, int count,
- struct xencomm_handle *str)
-{
- return _hypercall3(int, console_io, cmd, count, str);
-}
-
-static inline int
-xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
-{
- return _hypercall2(int, physdev_op, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
- struct xencomm_handle *uop,
- unsigned int count)
-{
- return _hypercall3(int, grant_table_op, cmd, uop, count);
-}
-
-int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-
-extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
-
-static inline int
-xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
-{
- return _hypercall2(int, callback_op, cmd, arg);
-}
-
-static inline long
-xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
-{
- return _hypercall3(long, vcpu_op, cmd, cpu, arg);
-}
-
-static inline int
-HYPERVISOR_physdev_op(int cmd, void *arg)
-{
- switch (cmd) {
- case PHYSDEVOP_eoi:
- return _hypercall1(int, ia64_fast_eoi,
- ((struct physdev_eoi *)arg)->irq);
- default:
- return xencomm_hypercall_physdev_op(cmd, arg);
- }
-}
-
-static inline long
-xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
-{
- return _hypercall1(long, opt_feature, arg);
-}
-
-/* for balloon driver */
-#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
-
-/* Use xencomm to do hypercalls. */
-#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
-#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
-#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
-#define HYPERVISOR_multicall xencomm_hypercall_multicall
-#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
-#define HYPERVISOR_console_io xencomm_hypercall_console_io
-#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
-#define HYPERVISOR_suspend xencomm_hypercall_suspend
-#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
-#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
-
-/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
-#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
-
-static inline int
-HYPERVISOR_shutdown(
- unsigned int reason)
-{
- struct sched_shutdown sched_shutdown = {
- .reason = reason
- };
-
- int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
-
- return rc;
-}
-
-/* for netfront.c, netback.c */
-#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
-
-static inline void
-MULTI_update_va_mapping(
- struct multicall_entry *mcl, unsigned long va,
- pte_t new_val, unsigned long flags)
-{
- mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->result = 0;
-}
-
-static inline void
-MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
- void *uop, unsigned int count)
-{
- mcl->op = __HYPERVISOR_grant_table_op;
- mcl->args[0] = cmd;
- mcl->args[1] = (unsigned long)uop;
- mcl->args[2] = count;
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- mcl->op = __HYPERVISOR_mmu_update;
- mcl->args[0] = (unsigned long)req;
- mcl->args[1] = count;
- mcl->args[2] = (unsigned long)success_count;
- mcl->args[3] = domid;
-}
-
-#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
deleted file mode 100644
index 67455c2..0000000
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/******************************************************************************
- * hypervisor.h
- *
- * Linux-specific hypervisor handling.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_IA64_XEN_HYPERVISOR_H
-#define _ASM_IA64_XEN_HYPERVISOR_H
-
-#include <linux/err.h>
-#include <xen/interface/xen.h>
-#include <xen/interface/version.h> /* to compile feature.c */
-#include <xen/features.h> /* to comiple xen-netfront.c */
-#include <xen/xen.h>
-#include <asm/xen/hypercall.h>
-
-#ifdef CONFIG_XEN
-extern struct shared_info *HYPERVISOR_shared_info;
-extern struct start_info *xen_start_info;
-
-void __init xen_setup_vcpu_info_placement(void);
-void force_evtchn_callback(void);
-
-/* for drivers/xen/balloon/balloon.c */
-#ifdef CONFIG_XEN_SCRUB_PAGES
-#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-#else
-#define scrub_pages(_p, _n) ((void)0)
-#endif
-
-/* For setup_arch() in arch/ia64/kernel/setup.c */
-void xen_ia64_enable_opt_feature(void);
-#endif
-
-#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
deleted file mode 100644
index c53a476..0000000
--- a/arch/ia64/include/asm/xen/inst.h
+++ /dev/null
@@ -1,486 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/inst.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <asm/xen/privop.h>
-
-#define ia64_ivt xen_ivt
-#define DO_SAVE_MIN XEN_DO_SAVE_MIN
-
-#define __paravirt_switch_to xen_switch_to
-#define __paravirt_leave_syscall xen_leave_syscall
-#define __paravirt_work_processed_syscall xen_work_processed_syscall
-#define __paravirt_leave_kernel xen_leave_kernel
-#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
-#define __paravirt_work_processed_syscall_target \
- xen_work_processed_syscall
-
-#define paravirt_fsyscall_table xen_fsyscall_table
-#define paravirt_fsys_bubble_down xen_fsys_bubble_down
-
-#define MOV_FROM_IFA(reg) \
- movl reg = XSI_IFA; \
- ;; \
- ld8 reg = [reg]
-
-#define MOV_FROM_ITIR(reg) \
- movl reg = XSI_ITIR; \
- ;; \
- ld8 reg = [reg]
-
-#define MOV_FROM_ISR(reg) \
- movl reg = XSI_ISR; \
- ;; \
- ld8 reg = [reg]
-
-#define MOV_FROM_IHA(reg) \
- movl reg = XSI_IHA; \
- ;; \
- ld8 reg = [reg]
-
-#define MOV_FROM_IPSR(pred, reg) \
-(pred) movl reg = XSI_IPSR; \
- ;; \
-(pred) ld8 reg = [reg]
-
-#define MOV_FROM_IIM(reg) \
- movl reg = XSI_IIM; \
- ;; \
- ld8 reg = [reg]
-
-#define MOV_FROM_IIP(reg) \
- movl reg = XSI_IIP; \
- ;; \
- ld8 reg = [reg]
-
-.macro __MOV_FROM_IVR reg, clob
- .ifc "\reg", "r8"
- XEN_HYPER_GET_IVR
- .exitm
- .endif
- .ifc "\clob", "r8"
- XEN_HYPER_GET_IVR
- ;;
- mov \reg = r8
- .exitm
- .endif
-
- mov \clob = r8
- ;;
- XEN_HYPER_GET_IVR
- ;;
- mov \reg = r8
- ;;
- mov r8 = \clob
-.endm
-#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
-
-.macro __MOV_FROM_PSR pred, reg, clob
- .ifc "\reg", "r8"
- (\pred) XEN_HYPER_GET_PSR;
- .exitm
- .endif
- .ifc "\clob", "r8"
- (\pred) XEN_HYPER_GET_PSR
- ;;
- (\pred) mov \reg = r8
- .exitm
- .endif
-
- (\pred) mov \clob = r8
- (\pred) XEN_HYPER_GET_PSR
- ;;
- (\pred) mov \reg = r8
- (\pred) mov r8 = \clob
-.endm
-#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
-
-/* assuming ar.itc is read with interrupt disabled. */
-#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
-(pred) movl clob = XSI_ITC_OFFSET; \
- ;; \
-(pred) ld8 clob = [clob]; \
-(pred) mov reg = ar.itc; \
- ;; \
-(pred) add reg = reg, clob; \
- ;; \
-(pred) movl clob = XSI_ITC_LAST; \
- ;; \
-(pred) ld8 clob = [clob]; \
- ;; \
-(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
- ;; \
-(pred_clob) add reg = 1, clob; \
- ;; \
-(pred) movl clob = XSI_ITC_LAST; \
- ;; \
-(pred) st8 [clob] = reg
-
-
-#define MOV_TO_IFA(reg, clob) \
- movl clob = XSI_IFA; \
- ;; \
- st8 [clob] = reg \
-
-#define MOV_TO_ITIR(pred, reg, clob) \
-(pred) movl clob = XSI_ITIR; \
- ;; \
-(pred) st8 [clob] = reg
-
-#define MOV_TO_IHA(pred, reg, clob) \
-(pred) movl clob = XSI_IHA; \
- ;; \
-(pred) st8 [clob] = reg
-
-#define MOV_TO_IPSR(pred, reg, clob) \
-(pred) movl clob = XSI_IPSR; \
- ;; \
-(pred) st8 [clob] = reg; \
- ;;
-
-#define MOV_TO_IFS(pred, reg, clob) \
-(pred) movl clob = XSI_IFS; \
- ;; \
-(pred) st8 [clob] = reg; \
- ;;
-
-#define MOV_TO_IIP(reg, clob) \
- movl clob = XSI_IIP; \
- ;; \
- st8 [clob] = reg
-
-.macro ____MOV_TO_KR kr, reg, clob0, clob1
- .ifc "\clob0", "r9"
- .error "clob0 \clob0 must not be r9"
- .endif
- .ifc "\clob1", "r8"
- .error "clob1 \clob1 must not be r8"
- .endif
-
- .ifnc "\reg", "r9"
- .ifnc "\clob1", "r9"
- mov \clob1 = r9
- .endif
- mov r9 = \reg
- .endif
- .ifnc "\clob0", "r8"
- mov \clob0 = r8
- .endif
- mov r8 = \kr
- ;;
- XEN_HYPER_SET_KR
-
- .ifnc "\reg", "r9"
- .ifnc "\clob1", "r9"
- mov r9 = \clob1
- .endif
- .endif
- .ifnc "\clob0", "r8"
- mov r8 = \clob0
- .endif
-.endm
-
-.macro __MOV_TO_KR kr, reg, clob0, clob1
- .ifc "\clob0", "r9"
- ____MOV_TO_KR \kr, \reg, \clob1, \clob0
- .exitm
- .endif
- .ifc "\clob1", "r8"
- ____MOV_TO_KR \kr, \reg, \clob1, \clob0
- .exitm
- .endif
-
- ____MOV_TO_KR \kr, \reg, \clob0, \clob1
-.endm
-
-#define MOV_TO_KR(kr, reg, clob0, clob1) \
- __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
-
-
-.macro __ITC_I pred, reg, clob
- .ifc "\reg", "r8"
- (\pred) XEN_HYPER_ITC_I
- .exitm
- .endif
- .ifc "\clob", "r8"
- (\pred) mov r8 = \reg
- ;;
- (\pred) XEN_HYPER_ITC_I
- .exitm
- .endif
-
- (\pred) mov \clob = r8
- (\pred) mov r8 = \reg
- ;;
- (\pred) XEN_HYPER_ITC_I
- ;;
- (\pred) mov r8 = \clob
- ;;
-.endm
-#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
-
-.macro __ITC_D pred, reg, clob
- .ifc "\reg", "r8"
- (\pred) XEN_HYPER_ITC_D
- ;;
- .exitm
- .endif
- .ifc "\clob", "r8"
- (\pred) mov r8 = \reg
- ;;
- (\pred) XEN_HYPER_ITC_D
- ;;
- .exitm
- .endif
-
- (\pred) mov \clob = r8
- (\pred) mov r8 = \reg
- ;;
- (\pred) XEN_HYPER_ITC_D
- ;;
- (\pred) mov r8 = \clob
- ;;
-.endm
-#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
-
-.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
- .ifc "\reg", "r8"
- (\pred_i)XEN_HYPER_ITC_I
- ;;
- (\pred_d)XEN_HYPER_ITC_D
- ;;
- .exitm
- .endif
- .ifc "\clob", "r8"
- mov r8 = \reg
- ;;
- (\pred_i)XEN_HYPER_ITC_I
- ;;
- (\pred_d)XEN_HYPER_ITC_D
- ;;
- .exitm
- .endif
-
- mov \clob = r8
- mov r8 = \reg
- ;;
- (\pred_i)XEN_HYPER_ITC_I
- ;;
- (\pred_d)XEN_HYPER_ITC_D
- ;;
- mov r8 = \clob
- ;;
-.endm
-#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
- __ITC_I_AND_D pred_i, pred_d, reg, clob
-
-.macro __THASH pred, reg0, reg1, clob
- .ifc "\reg0", "r8"
- (\pred) mov r8 = \reg1
- (\pred) XEN_HYPER_THASH
- .exitm
- .endc
- .ifc "\reg1", "r8"
- (\pred) XEN_HYPER_THASH
- ;;
- (\pred) mov \reg0 = r8
- ;;
- .exitm
- .endif
- .ifc "\clob", "r8"
- (\pred) mov r8 = \reg1
- (\pred) XEN_HYPER_THASH
- ;;
- (\pred) mov \reg0 = r8
- ;;
- .exitm
- .endif
-
- (\pred) mov \clob = r8
- (\pred) mov r8 = \reg1
- (\pred) XEN_HYPER_THASH
- ;;
- (\pred) mov \reg0 = r8
- (\pred) mov r8 = \clob
- ;;
-.endm
-#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
-
-#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
- mov clob0 = 1; \
- movl clob1 = XSI_PSR_IC; \
- ;; \
- st4 [clob1] = clob0 \
- ;;
-
-#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
- ;; \
- srlz.d; \
- mov clob1 = 1; \
- movl clob0 = XSI_PSR_IC; \
- ;; \
- st4 [clob0] = clob1
-
-#define RSM_PSR_IC(clob) \
- movl clob = XSI_PSR_IC; \
- ;; \
- st4 [clob] = r0; \
- ;;
-
-/* pred will be clobbered */
-#define MASK_TO_PEND_OFS (-1)
-#define SSM_PSR_I(pred, pred_clob, clob) \
-(pred) movl clob = XSI_PSR_I_ADDR \
- ;; \
-(pred) ld8 clob = [clob] \
- ;; \
- /* if (pred) vpsr.i = 1 */ \
- /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
-(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
- ;; \
- /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
-(pred) ld1 clob = [clob] \
- ;; \
-(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
- ;; \
-(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
-
-#define RSM_PSR_I(pred, clob0, clob1) \
- movl clob0 = XSI_PSR_I_ADDR; \
- mov clob1 = 1; \
- ;; \
- ld8 clob0 = [clob0]; \
- ;; \
-(pred) st1 [clob0] = clob1
-
-#define RSM_PSR_I_IC(clob0, clob1, clob2) \
- movl clob0 = XSI_PSR_I_ADDR; \
- movl clob1 = XSI_PSR_IC; \
- ;; \
- ld8 clob0 = [clob0]; \
- mov clob2 = 1; \
- ;; \
- /* note: clears both vpsr.i and vpsr.ic! */ \
- st1 [clob0] = clob2; \
- st4 [clob1] = r0; \
- ;;
-
-#define RSM_PSR_DT \
- XEN_HYPER_RSM_PSR_DT
-
-#define RSM_PSR_BE_I(clob0, clob1) \
- RSM_PSR_I(p0, clob0, clob1); \
- rum psr.be
-
-#define SSM_PSR_DT_AND_SRLZ_I \
- XEN_HYPER_SSM_PSR_DT
-
-#define BSW_0(clob0, clob1, clob2) \
- ;; \
- /* r16-r31 all now hold bank1 values */ \
- mov clob2 = ar.unat; \
- movl clob0 = XSI_BANK1_R16; \
- movl clob1 = XSI_BANK1_R16 + 8; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
- ;; \
-.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
-.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
- ;; \
- mov clob1 = ar.unat; \
- movl clob0 = XSI_B1NAT; \
- ;; \
- st8 [clob0] = clob1; \
- mov ar.unat = clob2; \
- movl clob0 = XSI_BANKNUM; \
- ;; \
- st4 [clob0] = r0
-
-
- /* FIXME: THIS CODE IS NOT NaT SAFE! */
-#define XEN_BSW_1(clob) \
- mov clob = ar.unat; \
- movl r30 = XSI_B1NAT; \
- ;; \
- ld8 r30 = [r30]; \
- mov r31 = 1; \
- ;; \
- mov ar.unat = r30; \
- movl r30 = XSI_BANKNUM; \
- ;; \
- st4 [r30] = r31; \
- movl r30 = XSI_BANK1_R16; \
- movl r31 = XSI_BANK1_R16+8; \
- ;; \
- ld8.fill r16 = [r30], 16; \
- ld8.fill r17 = [r31], 16; \
- ;; \
- ld8.fill r18 = [r30], 16; \
- ld8.fill r19 = [r31], 16; \
- ;; \
- ld8.fill r20 = [r30], 16; \
- ld8.fill r21 = [r31], 16; \
- ;; \
- ld8.fill r22 = [r30], 16; \
- ld8.fill r23 = [r31], 16; \
- ;; \
- ld8.fill r24 = [r30], 16; \
- ld8.fill r25 = [r31], 16; \
- ;; \
- ld8.fill r26 = [r30], 16; \
- ld8.fill r27 = [r31], 16; \
- ;; \
- ld8.fill r28 = [r30], 16; \
- ld8.fill r29 = [r31], 16; \
- ;; \
- ld8.fill r30 = [r30]; \
- ld8.fill r31 = [r31]; \
- ;; \
- mov ar.unat = clob
-
-#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
-
-
-#define COVER \
- XEN_HYPER_COVER
-
-#define RFI \
- XEN_HYPER_RFI; \
- dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
deleted file mode 100644
index e88c5de..0000000
--- a/arch/ia64/include/asm/xen/interface.h
+++ /dev/null
@@ -1,363 +0,0 @@
-/******************************************************************************
- * arch-ia64/hypervisor-if.h
- *
- * Guest OS interface to IA64 Xen.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright by those who contributed. (in alphabetical order)
- *
- * Anthony Xu <anthony.xu@intel.com>
- * Eddie Dong <eddie.dong@intel.com>
- * Fred Yang <fred.yang@intel.com>
- * Kevin Tian <kevin.tian@intel.com>
- * Alex Williamson <alex.williamson@hp.com>
- * Chris Wright <chrisw@sous-sol.org>
- * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
- * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
- * Hollis Blanchard <hollisb@us.ibm.com>
- * Isaku Yamahata <yamahata@valinux.co.jp>
- * Jan Beulich <jbeulich@novell.com>
- * John Levon <john.levon@sun.com>
- * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
- * Keir Fraser <keir.fraser@citrix.com>
- * Kouya Shimura <kouya@jp.fujitsu.com>
- * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
- * Matt Chapman <matthewc@hp.com>
- * Matthew Chapman <matthewc@hp.com>
- * Samuel Thibault <samuel.thibault@eu.citrix.com>
- * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
- * Tristan Gingold <tgingold@free.fr>
- * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
- * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
- * Zhang Xin <xing.z.zhang@intel.com>
- * Zhang xiantao <xiantao.zhang@intel.com>
- * dan.magenheimer@hp.com
- * ian.pratt@cl.cam.ac.uk
- * michael.fetterman@cl.cam.ac.uk
- */
-
-#ifndef _ASM_IA64_XEN_INTERFACE_H
-#define _ASM_IA64_XEN_INTERFACE_H
-
-#define __DEFINE_GUEST_HANDLE(name, type) \
- typedef struct { type *p; } __guest_handle_ ## name
-
-#define DEFINE_GUEST_HANDLE_STRUCT(name) \
- __DEFINE_GUEST_HANDLE(name, struct name)
-#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-#define GUEST_HANDLE(name) __guest_handle_ ## name
-#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
-#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
-
-#ifndef __ASSEMBLY__
-/* Explicitly size integers that represent pfns in the public interface
- * with Xen so that we could have one ABI that works for 32 and 64 bit
- * guests. */
-typedef unsigned long xen_pfn_t;
-typedef unsigned long xen_ulong_t;
-/* Guest handles for primitive C types. */
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_GUEST_HANDLE(uint, unsigned int);
-__DEFINE_GUEST_HANDLE(ulong, unsigned long);
-
-DEFINE_GUEST_HANDLE(char);
-DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(long);
-DEFINE_GUEST_HANDLE(void);
-DEFINE_GUEST_HANDLE(uint64_t);
-DEFINE_GUEST_HANDLE(uint32_t);
-
-DEFINE_GUEST_HANDLE(xen_pfn_t);
-#define PRI_xen_pfn "lx"
-#endif
-
-/* Arch specific VIRQs definition */
-#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
-#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
-#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
-
-/* Maximum number of virtual CPUs in multi-processor guests. */
-/* keep sizeof(struct shared_page) <= PAGE_SIZE.
- * this is checked in arch/ia64/xen/hypervisor.c. */
-#define MAX_VIRT_CPUS 64
-
-#ifndef __ASSEMBLY__
-
-#define INVALID_MFN (~0UL)
-
-union vac {
- unsigned long value;
- struct {
- int a_int:1;
- int a_from_int_cr:1;
- int a_to_int_cr:1;
- int a_from_psr:1;
- int a_from_cpuid:1;
- int a_cover:1;
- int a_bsw:1;
- long reserved:57;
- };
-};
-
-union vdc {
- unsigned long value;
- struct {
- int d_vmsw:1;
- int d_extint:1;
- int d_ibr_dbr:1;
- int d_pmc:1;
- int d_to_pmd:1;
- int d_itm:1;
- long reserved:58;
- };
-};
-
-struct mapped_regs {
- union vac vac;
- union vdc vdc;
- unsigned long virt_env_vaddr;
- unsigned long reserved1[29];
- unsigned long vhpi;
- unsigned long reserved2[95];
- union {
- unsigned long vgr[16];
- unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
- when bank0 active */
- };
- union {
- unsigned long vbgr[16];
- unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
- when bank1 active */
- };
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long reserved3[11];
- unsigned long vpsr;
- unsigned long vpr;
- unsigned long reserved4[76];
- union {
- unsigned long vcr[128];
- struct {
- unsigned long dcr; /* CR0 */
- unsigned long itm;
- unsigned long iva;
- unsigned long rsv1[5];
- unsigned long pta; /* CR8 */
- unsigned long rsv2[7];
- unsigned long ipsr; /* CR16 */
- unsigned long isr;
- unsigned long rsv3;
- unsigned long iip;
- unsigned long ifa;
- unsigned long itir;
- unsigned long iipa;
- unsigned long ifs;
- unsigned long iim; /* CR24 */
- unsigned long iha;
- unsigned long rsv4[38];
- unsigned long lid; /* CR64 */
- unsigned long ivr;
- unsigned long tpr;
- unsigned long eoi;
- unsigned long irr[4];
- unsigned long itv; /* CR72 */
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long rsv5[5];
- unsigned long lrr0; /* CR80 */
- unsigned long lrr1;
- unsigned long rsv6[46];
- };
- };
- union {
- unsigned long reserved5[128];
- struct {
- unsigned long precover_ifs;
- unsigned long unat; /* not sure if this is needed
- until NaT arch is done */
- int interrupt_collection_enabled; /* virtual psr.ic */
-
- /* virtual interrupt deliverable flag is
- * evtchn_upcall_mask in shared info area now.
- * interrupt_mask_addr is the address
- * of evtchn_upcall_mask for current vcpu
- */
- unsigned char *interrupt_mask_addr;
- int pending_interruption;
- unsigned char vpsr_pp;
- unsigned char vpsr_dfh;
- unsigned char hpsr_dfh;
- unsigned char hpsr_mfh;
- unsigned long reserved5_1[4];
- int metaphysical_mode; /* 1 = use metaphys mapping
- 0 = use virtual */
- int banknum; /* 0 or 1, which virtual
- register bank is active */
- unsigned long rrs[8]; /* region registers */
- unsigned long krs[8]; /* kernel registers */
- unsigned long tmp[16]; /* temp registers
- (e.g. for hyperprivops) */
-
- /* itc paravirtualization
- * vAR.ITC = mAR.ITC + itc_offset
- * itc_last is one which was lastly passed to
- * the guest OS in order to prevent it from
- * going backwords.
- */
- unsigned long itc_offset;
- unsigned long itc_last;
- };
- };
-};
-
-struct arch_vcpu_info {
- /* nothing */
-};
-
-/*
- * This structure is used for magic page in domain pseudo physical address
- * space and the result of XENMEM_machine_memory_map.
- * As the XENMEM_machine_memory_map result,
- * xen_memory_map::nr_entries indicates the size in bytes
- * including struct xen_ia64_memmap_info. Not the number of entries.
- */
-struct xen_ia64_memmap_info {
- uint64_t efi_memmap_size; /* size of EFI memory map */
- uint64_t efi_memdesc_size; /* size of an EFI memory map
- * descriptor */
- uint32_t efi_memdesc_version; /* memory descriptor version */
- void *memdesc[0]; /* array of efi_memory_desc_t */
-};
-
-struct arch_shared_info {
- /* PFN of the start_info page. */
- unsigned long start_info_pfn;
-
- /* Interrupt vector for event channel. */
- int evtchn_vector;
-
- /* PFN of memmap_info page */
- unsigned int memmap_info_num_pages; /* currently only = 1 case is
- supported. */
- unsigned long memmap_info_pfn;
-
- uint64_t pad[31];
-};
-
-struct xen_callback {
- unsigned long ip;
-};
-typedef struct xen_callback xen_callback_t;
-
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/pvclock-abi.h>
-
-/* Size of the shared_info area (this is not related to page size). */
-#define XSI_SHIFT 14
-#define XSI_SIZE (1 << XSI_SHIFT)
-/* Log size of mapped_regs area (64 KB - only 4KB is used). */
-#define XMAPPEDREGS_SHIFT 12
-#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
-/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
-#define XMAPPEDREGS_OFS XSI_SIZE
-
-/* Hyperprivops. */
-#define HYPERPRIVOP_START 0x1
-#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
-#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
-#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
-#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
-#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
-#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
-#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
-#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
-#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
-#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
-#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
-#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
-#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
-#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
-#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
-#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
-#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
-#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
-#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
-#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
-#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
-#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
-#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
-#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
-#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
-#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
-#define HYPERPRIVOP_MAX (0x1a)
-
-/* Fast and light hypercalls. */
-#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
-
-/* Xencomm macros. */
-#define XENCOMM_INLINE_MASK 0xf800000000000000UL
-#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
-
-#ifndef __ASSEMBLY__
-
-/*
- * Optimization features.
- * The hypervisor may do some special optimizations for guests. This hypercall
- * can be used to switch on/of these special optimizations.
- */
-#define __HYPERVISOR_opt_feature 0x700UL
-
-#define XEN_IA64_OPTF_OFF 0x0
-#define XEN_IA64_OPTF_ON 0x1
-
-/*
- * If this feature is switched on, the hypervisor inserts the
- * tlb entries without calling the guests traphandler.
- * This is useful in guests using region 7 for identity mapping
- * like the linux kernel does.
- */
-#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
-
-/* Identity mapping of region 4 addresses in HVM. */
-#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
-
-/* Identity mapping of region 5 addresses in HVM. */
-#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
-
-#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
-
-struct xen_ia64_opt_feature {
- unsigned long cmd; /* Which feature */
- unsigned char on; /* Switch feature on/off */
- union {
- struct {
- /* The page protection bit mask of the pte.
- * This will be or'ed with the pte. */
- unsigned long pgprot;
- unsigned long key; /* A protection key for itir.*/
- };
- };
-};
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
deleted file mode 100644
index a904509..0000000
--- a/arch/ia64/include/asm/xen/irq.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/irq.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _ASM_IA64_XEN_IRQ_H
-#define _ASM_IA64_XEN_IRQ_H
-
-/*
- * The flat IRQ space is divided into two regions:
- * 1. A one-to-one mapping of real physical IRQs. This space is only used
- * if we have physical device-access privilege. This region is at the
- * start of the IRQ space so that existing device drivers do not need
- * to be modified to translate physical IRQ numbers into our IRQ space.
- * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
- * are bound using the provided bind/unbind functions.
- */
-
-#define XEN_PIRQ_BASE 0
-#define XEN_NR_PIRQS 256
-
-#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
-#define XEN_NR_DYNIRQS (NR_CPUS * 8)
-
-#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
-
-#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
deleted file mode 100644
index 00cf03e..0000000
--- a/arch/ia64/include/asm/xen/minstate.h
+++ /dev/null
@@ -1,143 +0,0 @@
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-/* read ar.itc in advance, and use it before leaving bank 0 */
-#define XEN_ACCOUNT_GET_STAMP \
- MOV_FROM_ITC(pUStk, p6, r20, r2);
-#else
-#define XEN_ACCOUNT_GET_STAMP
-#endif
-
-/*
- * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * r2 = points to &pt_regs.r16
- * r8 = contents of ar.ccv
- * r9 = contents of ar.csd
- * r10 = contents of ar.ssd
- * r11 = FPSR_DEFAULT
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- * preserved
- * CONFIG_XEN note: p6/p7 are not preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
- mov r16=IA64_KR(CURRENT); /* M */ \
- mov r27=ar.rsc; /* M */ \
- mov r20=r1; /* A */ \
- mov r25=ar.unat; /* M */ \
- MOV_FROM_IPSR(p0,r29); /* M */ \
- MOV_FROM_IIP(r28); /* M */ \
- mov r21=ar.fpsr; /* M */ \
- mov r26=ar.pfs; /* I */ \
- __COVER; /* B;; (or nothing) */ \
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
- ;; \
- ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
- st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
- adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- /* SAVE_IFS;*/ /* see xen special handling below */ \
- cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
- ;; \
-(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
- ;; \
-(pUStk) mov.m r24=ar.rnat; \
-(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
-(pKStk) mov r1=sp; /* get sp */ \
- ;; \
-(pUStk) lfetch.fault.excl.nt1 [r22]; \
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
- ;; \
-(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
- ;; \
-(pUStk) mov r18=ar.bsp; \
-(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
- adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
- adds r16=PT(CR_IPSR),r1; \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- st8 [r16]=r29; /* save cr.ipsr */ \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
- mov r29=b0 \
- ;; \
- WORKAROUND; \
- adds r16=PT(R8),r1; /* initialize first base pointer */ \
- adds r17=PT(R9),r1; /* initialize second base pointer */ \
-(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r8,16; \
-.mem.offset 8,0; st8.spill [r17]=r9,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r10,24; \
- movl r8=XSI_PRECOVER_IFS; \
-.mem.offset 8,0; st8.spill [r17]=r11,24; \
- ;; \
- /* xen special handling for possibly lazy cover */ \
- /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
- ld8 r30=[r8]; \
-(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
- st8 [r16]=r28,16; /* save cr.iip */ \
- ;; \
- st8 [r17]=r30,16; /* save cr.ifs */ \
- mov r8=ar.ccv; \
- mov r9=ar.csd; \
- mov r10=ar.ssd; \
- movl r11=FPSR_DEFAULT; /* L-unit */ \
- ;; \
- st8 [r16]=r25,16; /* save ar.unat */ \
- st8 [r17]=r26,16; /* save ar.pfs */ \
- shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
- ;; \
- st8 [r16]=r27,16; /* save ar.rsc */ \
-(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
-(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
- ;; /* avoid RAW on r16 & r17 */ \
-(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
- st8 [r17]=r31,16; /* save predicates */ \
-(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
- ;; \
- st8 [r16]=r29,16; /* save b0 */ \
- st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
- cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
-.mem.offset 8,0; st8.spill [r17]=r12,16; \
- adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r13,16; \
-.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
- mov r13=IA64_KR(CURRENT); /* establish `current' */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r15,16; \
-.mem.offset 8,0; st8.spill [r17]=r14,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r2,16; \
-.mem.offset 8,0; st8.spill [r17]=r3,16; \
- XEN_ACCOUNT_GET_STAMP \
- adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
- ;; \
- EXTRA; \
- movl r1=__gp; /* establish kernel global pointer */ \
- ;; \
- ACCOUNT_SYS_ENTER \
- BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
- ;;
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h
deleted file mode 100644
index 96e42f9..0000000
--- a/arch/ia64/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
-#define _ASM_IA64_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-mapping.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
-{
- void *vstart = (void*)__get_free_pages(flags, get_order(size));
- *dma_handle = virt_to_phys(vstart);
- return vstart;
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs) { }
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs) { }
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
-
-#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
deleted file mode 100644
index 03441a78..0000000
--- a/arch/ia64/include/asm/xen/page.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/page.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _ASM_IA64_XEN_PAGE_H
-#define _ASM_IA64_XEN_PAGE_H
-
-#define INVALID_P2M_ENTRY (~0UL)
-
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
-{
- return mfn;
-}
-
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
-{
- return pfn;
-}
-
-#define phys_to_machine_mapping_valid(_x) (1)
-
-static inline void *mfn_to_virt(unsigned long mfn)
-{
- return __va(mfn << PAGE_SHIFT);
-}
-
-static inline unsigned long virt_to_mfn(void *virt)
-{
- return __pa(virt) >> PAGE_SHIFT;
-}
-
-/* for tpmfront.c */
-static inline unsigned long virt_to_machine(void *virt)
-{
- return __pa(virt);
-}
-
-static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- /* nothing */
-}
-
-#define pte_mfn(_x) pte_pfn(_x)
-#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
-#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
-
-#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h
deleted file mode 100644
index eae944e..0000000
--- a/arch/ia64/include/asm/xen/patchlist.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/patchlist.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#define __paravirt_start_gate_fsyscall_patchlist \
- __xen_start_gate_fsyscall_patchlist
-#define __paravirt_end_gate_fsyscall_patchlist \
- __xen_end_gate_fsyscall_patchlist
-#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
- __xen_start_gate_brl_fsys_bubble_down_patchlist
-#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
- __xen_end_gate_brl_fsys_bubble_down_patchlist
-#define __paravirt_start_gate_vtop_patchlist \
- __xen_start_gate_vtop_patchlist
-#define __paravirt_end_gate_vtop_patchlist \
- __xen_end_gate_vtop_patchlist
-#define __paravirt_start_gate_mckinley_e9_patchlist \
- __xen_start_gate_mckinley_e9_patchlist
-#define __paravirt_end_gate_mckinley_e9_patchlist \
- __xen_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
deleted file mode 100644
index fb4ec5e..0000000
--- a/arch/ia64/include/asm/xen/privop.h
+++ /dev/null
@@ -1,135 +0,0 @@
-#ifndef _ASM_IA64_XEN_PRIVOP_H
-#define _ASM_IA64_XEN_PRIVOP_H
-
-/*
- * Copyright (C) 2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- *
- * Paravirtualizations of privileged operations for Xen/ia64
- *
- *
- * inline privop and paravirt_alt support
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- */
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h> /* arch-ia64.h requires uint64_t */
-#endif
-#include <asm/xen/interface.h>
-
-/* At 1 MB, before per-cpu space but still addressable using addl instead
- of movl. */
-#define XSI_BASE 0xfffffffffff00000
-
-/* Address of mapped regs. */
-#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
-
-#ifdef __ASSEMBLY__
-#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
-#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
-#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
-#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
-#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
-#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
-#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
-#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
-#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
-#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
-#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
-#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
-#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
-
-#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
-#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
-#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
-#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
-#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
-#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
-#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
-#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
-#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
-#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
-#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
-#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
-#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
-#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
-#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
-#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
-#endif
-
-#ifndef __ASSEMBLY__
-
-/************************************************/
-/* Instructions paravirtualized for correctness */
-/************************************************/
-
-/* "fc" and "thash" are privilege-sensitive instructions, meaning they
- * may have different semantics depending on whether they are executed
- * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
- * be allowed to execute directly, lest incorrect semantics result. */
-extern void xen_fc(void *addr);
-extern unsigned long xen_thash(unsigned long addr);
-
-/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
- * is not currently used (though it may be in a long-format VHPT system!)
- * and the semantics of cover only change if psr.ic is off which is very
- * rare (and currently non-existent outside of assembly code */
-
-/* There are also privilege-sensitive registers. These registers are
- * readable at any privilege level but only writable at PL0. */
-extern unsigned long xen_get_cpuid(int index);
-extern unsigned long xen_get_pmd(int index);
-
-#ifndef ASM_SUPPORTED
-extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
-extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
-#endif
-
-/************************************************/
-/* Instructions paravirtualized for performance */
-/************************************************/
-
-/* Xen uses memory-mapped virtual privileged registers for access to many
- * performance-sensitive privileged registers. Some, like the processor
- * status register (psr), are broken up into multiple memory locations.
- * Others, like "pend", are abstractions based on privileged registers.
- * "Pend" is guaranteed to be set if reading cr.ivr would return a
- * (non-spurious) interrupt. */
-#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
-
-#define XSI_PSR_I \
- (*XEN_MAPPEDREGS->interrupt_mask_addr)
-#define xen_get_virtual_psr_i() \
- (!XSI_PSR_I)
-#define xen_set_virtual_psr_i(_val) \
- ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
-#define xen_set_virtual_psr_ic(_val) \
- ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
-#define xen_get_virtual_pend() \
- (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
-
-#ifndef ASM_SUPPORTED
-/* Although all privileged operations can be left to trap and will
- * be properly handled by Xen, some are frequent enough that we use
- * hyperprivops for performance. */
-extern unsigned long xen_get_psr(void);
-extern unsigned long xen_get_ivr(void);
-extern unsigned long xen_get_tpr(void);
-extern void xen_hyper_ssm_i(void);
-extern void xen_set_itm(unsigned long);
-extern void xen_set_tpr(unsigned long);
-extern void xen_eoi(unsigned long);
-extern unsigned long xen_get_rr(unsigned long index);
-extern void xen_set_rr(unsigned long index, unsigned long val);
-extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
- unsigned long val2, unsigned long val3,
- unsigned long val4);
-extern void xen_set_kr(unsigned long index, unsigned long val);
-extern void xen_ptcga(unsigned long addr, unsigned long size);
-#endif /* !ASM_SUPPORTED */
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
deleted file mode 100644
index 20b2950..0000000
--- a/arch/ia64/include/asm/xen/xcom_hcall.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
-#define _ASM_IA64_XEN_XCOM_HCALL_H
-
-/* These function creates inline or mini descriptor for the parameters and
- calls the corresponding xencomm_arch_hypercall_X.
- Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
- they want to use their own wrapper. */
-extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
-
-extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
-
-extern int xencomm_hypercall_xen_version(int cmd, void *arg);
-
-extern int xencomm_hypercall_physdev_op(int cmd, void *op);
-
-extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
- unsigned int count);
-
-extern int xencomm_hypercall_sched_op(int cmd, void *arg);
-
-extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
-
-extern int xencomm_hypercall_callback_op(int cmd, void *arg);
-
-extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
-
-extern int xencomm_hypercall_suspend(unsigned long srec);
-
-extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
-
-extern long xencomm_hypercall_opt_feature(void *arg);
-
-#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
deleted file mode 100644
index cded677..0000000
--- a/arch/ia64/include/asm/xen/xencomm.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_IA64_XEN_XENCOMM_H
-#define _ASM_IA64_XEN_XENCOMM_H
-
-#include <xen/xencomm.h>
-#include <asm/pgtable.h>
-
-/* Must be called before any hypercall. */
-extern void xencomm_initialize(void);
-extern int xencomm_is_initialized(void);
-
-/* Check if virtual contiguity means physical contiguity
- * where the passed address is a pointer value in virtual address.
- * On ia64, identity mapping area in region 7 or the piece of region 5
- * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
- */
-static inline int xencomm_is_phys_contiguous(unsigned long addr)
-{
- return (PAGE_OFFSET <= addr &&
- addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
- (KERNEL_START <= addr &&
- addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
-}
-
-#endif /* _ASM_IA64_XEN_XENCOMM_H */
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
index e90c40e..f034020 100644
--- a/arch/ia64/include/uapi/asm/break.h
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -20,13 +20,4 @@
*/
#define __IA64_BREAK_SYSCALL 0x100000
-/*
- * Xen specific break numbers:
- */
-#define __IA64_XEN_HYPERCALL 0x1000
-/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
- for xen hyperprivops */
-#define __IA64_XEN_HYPERPRIVOP_START 0x1
-#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
-
#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 59d52e3..bfa1931 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -53,7 +53,6 @@
#include <asm/numa.h>
#include <asm/sal.h>
#include <asm/cyclone.h>
-#include <asm/xen/hypervisor.h>
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
@@ -120,8 +119,6 @@ acpi_get_sysname(void)
return "uv";
else
return "sn2";
- } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
- return "xen";
}
#ifdef CONFIG_INTEL_IOMMU
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 46c9e30..60ef83e 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -16,9 +16,6 @@
#include <asm/sigcontext.h>
#include <asm/mca.h>
-#include <asm/xen/interface.h>
-#include <asm/xen/hypervisor.h>
-
#include "../kernel/sigframe.h"
#include "../kernel/fsyscall_gtod_data.h"
@@ -290,33 +287,4 @@ void foo(void)
DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
offsetof (struct itc_jitter_data_t, itc_lastcycle));
-#ifdef CONFIG_XEN
- BLANK();
-
- DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
- DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
-
-#define DEFINE_MAPPED_REG_OFS(sym, field) \
- DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
-
- DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
- DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
- DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
- DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
- DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
- DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
- DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
- DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
- DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
- DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
- DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
- DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
- DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
- DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
- DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
- DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
- DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
-#endif /* CONFIG_XEN */
}
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 991ca33..e6f80fc 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -416,8 +416,6 @@ start_ap:
default_setup_hook = 0 // Currently nothing needs to be done.
- .weak xen_setup_hook
-
.global hypervisor_type
hypervisor_type:
data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
@@ -426,7 +424,6 @@ hypervisor_type:
hypervisor_setup_hooks:
data8 default_setup_hook
- data8 xen_setup_hook
num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
.previous
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index ee56457..f6769cd 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -10,15 +10,11 @@
#include <linux/kbuild.h>
#include <linux/threads.h>
#include <asm/native/irq.h>
-#include <asm/xen/irq.h>
void foo(void)
{
union paravirt_nr_irqs_max {
char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
-#ifdef CONFIG_XEN
- char xen_nr_irqs[XEN_NR_IRQS];
-#endif
};
DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
index 64d6d81..1ad7512 100644
--- a/arch/ia64/kernel/paravirt_inst.h
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -22,9 +22,6 @@
#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
#include <asm/native/pvchk_inst.h>
-#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
-#include <asm/xen/inst.h>
-#include <asm/xen/minstate.h>
#else
#include <asm/native/inst.h>
#endif
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h
index 0684aa6..67cffc36 100644
--- a/arch/ia64/kernel/paravirt_patchlist.h
+++ b/arch/ia64/kernel/paravirt_patchlist.h
@@ -20,9 +20,5 @@
*
*/
-#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
-#include <asm/xen/patchlist.h>
-#else
#include <asm/native/patchlist.h>
-#endif
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0ccb28f..84f8a52 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -182,12 +182,6 @@ SECTIONS {
__start_gate_section = .;
*(.data..gate)
__stop_gate_section = .;
-#ifdef CONFIG_XEN
- . = ALIGN(PAGE_SIZE);
- __xen_start_gate_section = .;
- *(.data..gate.xen)
- __xen_stop_gate_section = .;
-#endif
}
/*
* make sure the gate page doesn't expose
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
deleted file mode 100644
index 5d8a06b..0000000
--- a/arch/ia64/xen/Kconfig
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# This Kconfig describes xen/ia64 options
-#
-
-config XEN
- bool "Xen hypervisor support"
- default y
- depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB
- select XEN_XENCOMM
- select NO_IDLE_HZ
- # followings are required to save/restore.
- select ARCH_SUSPEND_POSSIBLE
- select SUSPEND
- select PM_SLEEP
- help
- Enable Xen hypervisor support. Resulting kernel runs
- both as a guest OS on Xen and natively on hardware.
-
-config XEN_XENCOMM
- depends on XEN
- bool
-
-config NO_IDLE_HZ
- depends on XEN
- bool
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
deleted file mode 100644
index e6f4a0a..0000000
--- a/arch/ia64/xen/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# Makefile for Xen components
-#
-
-obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
- hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
- gate-data.o
-
-obj-$(CONFIG_IA64_GENERIC) += machvec.o
-
-# The gate DSO image is built using a special linker script.
-include $(srctree)/arch/ia64/kernel/Makefile.gate
-
-# tell compiled for xen
-CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
-AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
-
-# use same file of native.
-$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
- $(call if_changed_dep,as_o_S)
-$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
- $(call if_changed_dep,cpp_lds_S)
-
-
-AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
-
-# xen multi compile
-ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
-ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
-obj-y += $(ASM_PARAVIRT_OBJS)
-define paravirtualized_xen
-AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
-endef
-$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
-
-$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
- $(call if_changed_dep,as_o_S)
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S
deleted file mode 100644
index 6f95b6b..0000000
--- a/arch/ia64/xen/gate-data.S
+++ /dev/null
@@ -1,3 +0,0 @@
- .section .data..gate.xen, "aw"
-
- .incbin "arch/ia64/xen/gate.so"
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c
deleted file mode 100644
index c182813..0000000
--- a/arch/ia64/xen/grant-table.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/grant-table.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/memory.h>
-#include <xen/grant_table.h>
-
-#include <asm/xen/hypervisor.h>
-
-/****************************************************************************
- * grant table hack
- * cmd: GNTTABOP_xxx
- */
-
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
- unsigned long max_nr_gframes,
- struct grant_entry **__shared)
-{
- *__shared = __va(frames[0] << PAGE_SHIFT);
- return 0;
-}
-
-void arch_gnttab_unmap_shared(struct grant_entry *shared,
- unsigned long nr_gframes)
-{
- /* nothing */
-}
-
-static void
-gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
-{
- uint32_t flags;
-
- flags = uop->flags;
-
- if (flags & GNTMAP_host_map) {
- if (flags & GNTMAP_application_map) {
- printk(KERN_DEBUG
- "GNTMAP_application_map is not supported yet: "
- "flags 0x%x\n", flags);
- BUG();
- }
- if (flags & GNTMAP_contains_pte) {
- printk(KERN_DEBUG
- "GNTMAP_contains_pte is not supported yet: "
- "flags 0x%x\n", flags);
- BUG();
- }
- } else if (flags & GNTMAP_device_map) {
- printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
- BUG(); /* not yet. actually this flag is not used. */
- } else {
- BUG();
- }
-}
-
-int
-HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
-{
- if (cmd == GNTTABOP_map_grant_ref) {
- unsigned int i;
- for (i = 0; i < count; i++) {
- gnttab_map_grant_ref_pre(
- (struct gnttab_map_grant_ref *)uop + i);
- }
- }
- return xencomm_hypercall_grant_table_op(cmd, uop, count);
-}
-
-EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
deleted file mode 100644
index 08847aa..0000000
--- a/arch/ia64/xen/hypercall.S
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Support routines for Xen hypercalls
- *
- * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
- * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include <asm/intrinsics.h>
-#include <asm/xen/privop.h>
-
-#ifdef __INTEL_COMPILER
-/*
- * Hypercalls without parameter.
- */
-#define __HCALL0(name,hcall) \
- GLOBAL_ENTRY(name); \
- break hcall; \
- br.ret.sptk.many rp; \
- END(name)
-
-/*
- * Hypercalls with 1 parameter.
- */
-#define __HCALL1(name,hcall) \
- GLOBAL_ENTRY(name); \
- mov r8=r32; \
- break hcall; \
- br.ret.sptk.many rp; \
- END(name)
-
-/*
- * Hypercalls with 2 parameters.
- */
-#define __HCALL2(name,hcall) \
- GLOBAL_ENTRY(name); \
- mov r8=r32; \
- mov r9=r33; \
- break hcall; \
- br.ret.sptk.many rp; \
- END(name)
-
-__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
-__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
-__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
-__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
-
-__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
-__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
-__HCALL1(xen_thash, HYPERPRIVOP_THASH)
-__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
-__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
-__HCALL1(xen_fc, HYPERPRIVOP_FC)
-__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
-__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
-
-__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
-__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
-__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
-
-GLOBAL_ENTRY(xen_set_rr0_to_rr4)
- mov r8=r32
- mov r9=r33
- mov r10=r34
- mov r11=r35
- mov r14=r36
- XEN_HYPER_SET_RR0_TO_RR4
- br.ret.sptk.many rp
- ;;
-END(xen_set_rr0_to_rr4)
-#endif
-
-GLOBAL_ENTRY(xen_send_ipi)
- mov r14=r32
- mov r15=r33
- mov r2=0x400
- break 0x1000
- ;;
- br.ret.sptk.many rp
- ;;
-END(xen_send_ipi)
-
-GLOBAL_ENTRY(__hypercall)
- mov r2=r37
- break 0x1000
- br.ret.sptk.many b0
- ;;
-END(__hypercall)
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
deleted file mode 100644
index fab6252..0000000
--- a/arch/ia64/xen/hypervisor.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/hypervisor.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/efi.h>
-#include <linux/export.h>
-#include <asm/xen/hypervisor.h>
-#include <asm/xen/privop.h>
-
-#include "irq_xen.h"
-
-struct shared_info *HYPERVISOR_shared_info __read_mostly =
- (struct shared_info *)XSI_BASE;
-EXPORT_SYMBOL(HYPERVISOR_shared_info);
-
-DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
-
-struct start_info *xen_start_info;
-EXPORT_SYMBOL(xen_start_info);
-
-EXPORT_SYMBOL(xen_domain_type);
-
-EXPORT_SYMBOL(__hypercall);
-
-/* Stolen from arch/x86/xen/enlighten.c */
-/*
- * Flag to determine whether vcpu info placement is available on all
- * VCPUs. We assume it is to start with, and then set it to zero on
- * the first failure. This is because it can succeed on some VCPUs
- * and not others, since it can involve hypervisor memory allocation,
- * or because the guest failed to guarantee all the appropriate
- * constraints on all VCPUs (ie buffer can't cross a page boundary).
- *
- * Note that any particular CPU may be using a placed vcpu structure,
- * but we can only optimise if the all are.
- *
- * 0: not available, 1: available
- */
-
-static void __init xen_vcpu_setup(int cpu)
-{
- /*
- * WARNING:
- * before changing MAX_VIRT_CPUS,
- * check that shared_info fits on a page
- */
- BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE);
- per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
-}
-
-void __init xen_setup_vcpu_info_placement(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- xen_vcpu_setup(cpu);
-}
-
-void
-xen_cpu_init(void)
-{
- xen_smp_intr_init();
-}
-
-/**************************************************************************
- * opt feature
- */
-void
-xen_ia64_enable_opt_feature(void)
-{
- /* Enable region 7 identity map optimizations in Xen */
- struct xen_ia64_opt_feature optf;
-
- optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
- optf.on = XEN_IA64_OPTF_ON;
- optf.pgprot = pgprot_val(PAGE_KERNEL);
- optf.key = 0; /* No key on linux. */
- HYPERVISOR_opt_feature(&optf);
-}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
deleted file mode 100644
index efb74da..0000000
--- a/arch/ia64/xen/irq_xen.c
+++ /dev/null
@@ -1,443 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/irq_xen.c
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/cpu.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/callback.h>
-#include <xen/events.h>
-
-#include <asm/xen/privop.h>
-
-#include "irq_xen.h"
-
-/***************************************************************************
- * pv_irq_ops
- * irq operations
- */
-
-static int
-xen_assign_irq_vector(int irq)
-{
- struct physdev_irq irq_op;
-
- irq_op.irq = irq;
- if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
- return -ENOSPC;
-
- return irq_op.vector;
-}
-
-static void
-xen_free_irq_vector(int vector)
-{
- struct physdev_irq irq_op;
-
- if (vector < IA64_FIRST_DEVICE_VECTOR ||
- vector > IA64_LAST_DEVICE_VECTOR)
- return;
-
- irq_op.vector = vector;
- if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
- printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n",
- __func__, vector);
-}
-
-
-static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
-static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
-static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
-static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
-static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
-static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
-#define NAME_SIZE 15
-static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
-#undef NAME_SIZE
-
-struct saved_irq {
- unsigned int irq;
- struct irqaction *action;
-};
-/* 16 should be far optimistic value, since only several percpu irqs
- * are registered early.
- */
-#define MAX_LATE_IRQ 16
-static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
-static unsigned short late_irq_cnt;
-static unsigned short saved_irq_cnt;
-static int xen_slab_ready;
-
-#ifdef CONFIG_SMP
-#include <linux/sched.h>
-
-/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
- * it ends up to issue several memory accesses upon percpu data and
- * thus adds unnecessary traffic to other paths.
- */
-static irqreturn_t
-xen_dummy_handler(int irq, void *dev_id)
-{
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-xen_resched_handler(int irq, void *dev_id)
-{
- scheduler_ipi();
- return IRQ_HANDLED;
-}
-
-static struct irqaction xen_ipi_irqaction = {
- .handler = handle_IPI,
- .flags = IRQF_DISABLED,
- .name = "IPI"
-};
-
-static struct irqaction xen_resched_irqaction = {
- .handler = xen_resched_handler,
- .flags = IRQF_DISABLED,
- .name = "resched"
-};
-
-static struct irqaction xen_tlb_irqaction = {
- .handler = xen_dummy_handler,
- .flags = IRQF_DISABLED,
- .name = "tlb_flush"
-};
-#endif
-
-/*
- * This is xen version percpu irq registration, which needs bind
- * to xen specific evtchn sub-system. One trick here is that xen
- * evtchn binding interface depends on kmalloc because related
- * port needs to be freed at device/cpu down. So we cache the
- * registration on BSP before slab is ready and then deal them
- * at later point. For rest instances happening after slab ready,
- * we hook them to xen evtchn immediately.
- *
- * FIXME: MCA is not supported by far, and thus "nomca" boot param is
- * required.
- */
-static void
-__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
- struct irqaction *action, int save)
-{
- int irq = 0;
-
- if (xen_slab_ready) {
- switch (vec) {
- case IA64_TIMER_VECTOR:
- snprintf(per_cpu(xen_timer_name, cpu),
- sizeof(per_cpu(xen_timer_name, cpu)),
- "%s%d", action->name, cpu);
- irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
- action->handler, action->flags,
- per_cpu(xen_timer_name, cpu), action->dev_id);
- per_cpu(xen_timer_irq, cpu) = irq;
- break;
- case IA64_IPI_RESCHEDULE:
- snprintf(per_cpu(xen_resched_name, cpu),
- sizeof(per_cpu(xen_resched_name, cpu)),
- "%s%d", action->name, cpu);
- irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
- action->handler, action->flags,
- per_cpu(xen_resched_name, cpu), action->dev_id);
- per_cpu(xen_resched_irq, cpu) = irq;
- break;
- case IA64_IPI_VECTOR:
- snprintf(per_cpu(xen_ipi_name, cpu),
- sizeof(per_cpu(xen_ipi_name, cpu)),
- "%s%d", action->name, cpu);
- irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
- action->handler, action->flags,
- per_cpu(xen_ipi_name, cpu), action->dev_id);
- per_cpu(xen_ipi_irq, cpu) = irq;
- break;
- case IA64_CMC_VECTOR:
- snprintf(per_cpu(xen_cmc_name, cpu),
- sizeof(per_cpu(xen_cmc_name, cpu)),
- "%s%d", action->name, cpu);
- irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
- action->handler,
- action->flags,
- per_cpu(xen_cmc_name, cpu),
- action->dev_id);
- per_cpu(xen_cmc_irq, cpu) = irq;
- break;
- case IA64_CMCP_VECTOR:
- snprintf(per_cpu(xen_cmcp_name, cpu),
- sizeof(per_cpu(xen_cmcp_name, cpu)),
- "%s%d", action->name, cpu);
- irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(xen_cmcp_name, cpu),
- action->dev_id);
- per_cpu(xen_cmcp_irq, cpu) = irq;
- break;
- case IA64_CPEP_VECTOR:
- snprintf(per_cpu(xen_cpep_name, cpu),
- sizeof(per_cpu(xen_cpep_name, cpu)),
- "%s%d", action->name, cpu);
- irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(xen_cpep_name, cpu),
- action->dev_id);
- per_cpu(xen_cpep_irq, cpu) = irq;
- break;
- case IA64_CPE_VECTOR:
- case IA64_MCA_RENDEZ_VECTOR:
- case IA64_PERFMON_VECTOR:
- case IA64_MCA_WAKEUP_VECTOR:
- case IA64_SPURIOUS_INT_VECTOR:
- /* No need to complain, these aren't supported. */
- break;
- default:
- printk(KERN_WARNING "Percpu irq %d is unsupported "
- "by xen!\n", vec);
- break;
- }
- BUG_ON(irq < 0);
-
- if (irq > 0) {
- /*
- * Mark percpu. Without this, migrate_irqs() will
- * mark the interrupt for migrations and trigger it
- * on cpu hotplug.
- */
- irq_set_status_flags(irq, IRQ_PER_CPU);
- }
- }
-
- /* For BSP, we cache registered percpu irqs, and then re-walk
- * them when initializing APs
- */
- if (!cpu && save) {
- BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
- saved_percpu_irqs[saved_irq_cnt].irq = vec;
- saved_percpu_irqs[saved_irq_cnt].action = action;
- saved_irq_cnt++;
- if (!xen_slab_ready)
- late_irq_cnt++;
- }
-}
-
-static void
-xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
-{
- __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
-}
-
-static void
-xen_bind_early_percpu_irq(void)
-{
- int i;
-
- xen_slab_ready = 1;
- /* There's no race when accessing this cached array, since only
- * BSP will face with such step shortly
- */
- for (i = 0; i < late_irq_cnt; i++)
- __xen_register_percpu_irq(smp_processor_id(),
- saved_percpu_irqs[i].irq,
- saved_percpu_irqs[i].action, 0);
-}
-
-/* FIXME: There's no obvious point to check whether slab is ready. So
- * a hack is used here by utilizing a late time hook.
- */
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int unbind_evtchn_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- if (action == CPU_DEAD) {
- /* Unregister evtchn. */
- if (per_cpu(xen_cpep_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
- NULL);
- per_cpu(xen_cpep_irq, cpu) = -1;
- }
- if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
- NULL);
- per_cpu(xen_cmcp_irq, cpu) = -1;
- }
- if (per_cpu(xen_cmc_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
- per_cpu(xen_cmc_irq, cpu) = -1;
- }
- if (per_cpu(xen_ipi_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
- per_cpu(xen_ipi_irq, cpu) = -1;
- }
- if (per_cpu(xen_resched_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
- NULL);
- per_cpu(xen_resched_irq, cpu) = -1;
- }
- if (per_cpu(xen_timer_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
- NULL);
- per_cpu(xen_timer_irq, cpu) = -1;
- }
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block unbind_evtchn_notifier = {
- .notifier_call = unbind_evtchn_callback,
- .priority = 0
-};
-#endif
-
-void xen_smp_intr_init_early(unsigned int cpu)
-{
-#ifdef CONFIG_SMP
- unsigned int i;
-
- for (i = 0; i < saved_irq_cnt; i++)
- __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
- saved_percpu_irqs[i].action, 0);
-#endif
-}
-
-void xen_smp_intr_init(void)
-{
-#ifdef CONFIG_SMP
- unsigned int cpu = smp_processor_id();
- struct callback_register event = {
- .type = CALLBACKTYPE_event,
- .address = { .ip = (unsigned long)&xen_event_callback },
- };
-
- if (cpu == 0) {
- /* Initialization was already done for boot cpu. */
-#ifdef CONFIG_HOTPLUG_CPU
- /* Register the notifier only once. */
- register_cpu_notifier(&unbind_evtchn_notifier);
-#endif
- return;
- }
-
- /* This should be piggyback when setup vcpu guest context */
- BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
-#endif /* CONFIG_SMP */
-}
-
-void __init
-xen_irq_init(void)
-{
- struct callback_register event = {
- .type = CALLBACKTYPE_event,
- .address = { .ip = (unsigned long)&xen_event_callback },
- };
-
- xen_init_IRQ();
- BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
- late_time_init = xen_bind_early_percpu_irq;
-}
-
-void
-xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
-{
-#ifdef CONFIG_SMP
- /* TODO: we need to call vcpu_up here */
- if (unlikely(vector == ap_wakeup_vector)) {
- /* XXX
- * This should be in __cpu_up(cpu) in ia64 smpboot.c
- * like x86. But don't want to modify it,
- * keep it untouched.
- */
- xen_smp_intr_init_early(cpu);
-
- xen_send_ipi(cpu, vector);
- /* vcpu_prepare_and_up(cpu); */
- return;
- }
-#endif
-
- switch (vector) {
- case IA64_IPI_VECTOR:
- xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
- break;
- case IA64_IPI_RESCHEDULE:
- xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
- break;
- case IA64_CMCP_VECTOR:
- xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
- break;
- case IA64_CPEP_VECTOR:
- xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
- break;
- case IA64_TIMER_VECTOR: {
- /* this is used only once by check_sal_cache_flush()
- at boot time */
- static int used = 0;
- if (!used) {
- xen_send_ipi(cpu, IA64_TIMER_VECTOR);
- used = 1;
- break;
- }
- /* fallthrough */
- }
- default:
- printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
- vector);
- notify_remote_via_irq(0); /* defaults to 0 irq */
- break;
- }
-}
-
-static void __init
-xen_register_ipi(void)
-{
-#ifdef CONFIG_SMP
- register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
- register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
- register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
-#endif
-}
-
-static void
-xen_resend_irq(unsigned int vector)
-{
- (void)resend_irq_on_evtchn(vector);
-}
-
-const struct pv_irq_ops xen_irq_ops __initconst = {
- .register_ipi = xen_register_ipi,
-
- .assign_irq_vector = xen_assign_irq_vector,
- .free_irq_vector = xen_free_irq_vector,
- .register_percpu_irq = xen_register_percpu_irq,
-
- .resend_irq = xen_resend_irq,
-};
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h
deleted file mode 100644
index 1778517..0000000
--- a/arch/ia64/xen/irq_xen.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/irq_xen.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef IRQ_XEN_H
-#define IRQ_XEN_H
-
-extern void (*late_time_init)(void);
-extern char xen_event_callback;
-void __init xen_init_IRQ(void);
-
-extern const struct pv_irq_ops xen_irq_ops __initconst;
-extern void xen_smp_intr_init(void);
-extern void xen_send_ipi(int cpu, int vec);
-
-#endif /* IRQ_XEN_H */
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c
deleted file mode 100644
index 4ad588a..0000000
--- a/arch/ia64/xen/machvec.c
+++ /dev/null
@@ -1,4 +0,0 @@
-#define MACHVEC_PLATFORM_NAME xen
-#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h>
-#include <asm/machvec_init.h>
-
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
deleted file mode 100644
index 419c862..0000000
--- a/arch/ia64/xen/suspend.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/suspend.c
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * suspend/resume
- */
-
-#include <xen/xen-ops.h>
-#include <asm/xen/hypervisor.h>
-#include "time.h"
-
-void
-xen_mm_pin_all(void)
-{
- /* nothing */
-}
-
-void
-xen_mm_unpin_all(void)
-{
- /* nothing */
-}
-
-void
-xen_arch_pre_suspend()
-{
- /* nothing */
-}
-
-void
-xen_arch_post_suspend(int suspend_cancelled)
-{
- if (suspend_cancelled)
- return;
-
- xen_ia64_enable_opt_feature();
- /* add more if necessary */
-}
-
-void xen_arch_resume(void)
-{
- xen_timer_resume_on_aps();
-}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
deleted file mode 100644
index 1f8244a..0000000
--- a/arch/ia64/xen/time.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/time.c
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/delay.h>
-#include <linux/kernel_stat.h>
-#include <linux/posix-timers.h>
-#include <linux/irq.h>
-#include <linux/clocksource.h>
-
-#include <asm/timex.h>
-
-#include <asm/xen/hypervisor.h>
-
-#include <xen/interface/vcpu.h>
-
-#include "../kernel/fsyscall_gtod_data.h"
-
-static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
-static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
-static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
-
-/* taken from i386/kernel/time-xen.c */
-static void xen_init_missing_ticks_accounting(int cpu)
-{
- struct vcpu_register_runstate_memory_area area;
- struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
- int rc;
-
- memset(runstate, 0, sizeof(*runstate));
-
- area.addr.v = runstate;
- rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
- &area);
- WARN_ON(rc && rc != -ENOSYS);
-
- per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
- per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
- + runstate->time[RUNSTATE_offline];
-}
-
-/*
- * Runstate accounting
- */
-/* stolen from arch/x86/xen/time.c */
-static void get_runstate_snapshot(struct vcpu_runstate_info *res)
-{
- u64 state_time;
- struct vcpu_runstate_info *state;
-
- BUG_ON(preemptible());
-
- state = &__get_cpu_var(xen_runstate);
-
- /*
- * The runstate info is always updated by the hypervisor on
- * the current CPU, so there's no need to use anything
- * stronger than a compiler barrier when fetching it.
- */
- do {
- state_time = state->state_entry_time;
- rmb();
- *res = *state;
- rmb();
- } while (state->state_entry_time != state_time);
-}
-
-#define NS_PER_TICK (1000000000LL/HZ)
-
-static unsigned long
-consider_steal_time(unsigned long new_itm)
-{
- unsigned long stolen, blocked;
- unsigned long delta_itm = 0, stolentick = 0;
- int cpu = smp_processor_id();
- struct vcpu_runstate_info runstate;
- struct task_struct *p = current;
-
- get_runstate_snapshot(&runstate);
-
- /*
- * Check for vcpu migration effect
- * In this case, itc value is reversed.
- * This causes huge stolen value.
- * This function just checks and reject this effect.
- */
- if (!time_after_eq(runstate.time[RUNSTATE_blocked],
- per_cpu(xen_blocked_time, cpu)))
- blocked = 0;
-
- if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
- runstate.time[RUNSTATE_offline],
- per_cpu(xen_stolen_time, cpu)))
- stolen = 0;
-
- if (!time_after(delta_itm + new_itm, ia64_get_itc()))
- stolentick = ia64_get_itc() - new_itm;
-
- do_div(stolentick, NS_PER_TICK);
- stolentick++;
-
- do_div(stolen, NS_PER_TICK);
-
- if (stolen > stolentick)
- stolen = stolentick;
-
- stolentick -= stolen;
- do_div(blocked, NS_PER_TICK);
-
- if (blocked > stolentick)
- blocked = stolentick;
-
- if (stolen > 0 || blocked > 0) {
- account_steal_ticks(stolen);
- account_idle_ticks(blocked);
- run_local_timers();
-
- rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
-
- scheduler_tick();
- run_posix_cpu_timers(p);
- delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
-
- if (cpu == time_keeper_id)
- xtime_update(stolen + blocked);
-
- local_cpu_data->itm_next = delta_itm + new_itm;
-
- per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
- per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
- }
- return delta_itm;
-}
-
-static int xen_do_steal_accounting(unsigned long *new_itm)
-{
- unsigned long delta_itm;
- delta_itm = consider_steal_time(*new_itm);
- *new_itm += delta_itm;
- if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
- return 1;
-
- return 0;
-}
-
-static void xen_itc_jitter_data_reset(void)
-{
- u64 lcycle, ret;
-
- do {
- lcycle = itc_jitter_data.itc_lastcycle;
- ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
- } while (unlikely(ret != lcycle));
-}
-
-/* based on xen_sched_clock() in arch/x86/xen/time.c. */
-/*
- * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
- * something similar logic should be implemented here.
- */
-/*
- * Xen sched_clock implementation. Returns the number of unstolen
- * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
- * states.
- */
-static unsigned long long xen_sched_clock(void)
-{
- struct vcpu_runstate_info runstate;
-
- unsigned long long now;
- unsigned long long offset;
- unsigned long long ret;
-
- /*
- * Ideally sched_clock should be called on a per-cpu basis
- * anyway, so preempt should already be disabled, but that's
- * not current practice at the moment.
- */
- preempt_disable();
-
- /*
- * both ia64_native_sched_clock() and xen's runstate are
- * based on mAR.ITC. So difference of them makes sense.
- */
- now = ia64_native_sched_clock();
-
- get_runstate_snapshot(&runstate);
-
- WARN_ON(runstate.state != RUNSTATE_running);
-
- offset = 0;
- if (now > runstate.state_entry_time)
- offset = now - runstate.state_entry_time;
- ret = runstate.time[RUNSTATE_blocked] +
- runstate.time[RUNSTATE_running] +
- offset;
-
- preempt_enable();
-
- return ret;
-}
-
-struct pv_time_ops xen_time_ops __initdata = {
- .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
- .do_steal_accounting = xen_do_steal_accounting,
- .clocksource_resume = xen_itc_jitter_data_reset,
- .sched_clock = xen_sched_clock,
-};
-
-/* Called after suspend, to resume time. */
-static void xen_local_tick_resume(void)
-{
- /* Just trigger a tick. */
- ia64_cpu_local_tick();
- touch_softlockup_watchdog();
-}
-
-void
-xen_timer_resume(void)
-{
- unsigned int cpu;
-
- xen_local_tick_resume();
-
- for_each_online_cpu(cpu)
- xen_init_missing_ticks_accounting(cpu);
-}
-
-static void ia64_cpu_local_tick_fn(void *unused)
-{
- xen_local_tick_resume();
- xen_init_missing_ticks_accounting(smp_processor_id());
-}
-
-void
-xen_timer_resume_on_aps(void)
-{
- smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
-}
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h
deleted file mode 100644
index f98d7e1..0000000
--- a/arch/ia64/xen/time.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/time.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-extern struct pv_time_ops xen_time_ops __initdata;
-void xen_timer_resume_on_aps(void);
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c
deleted file mode 100644
index ccaf743..0000000
--- a/arch/ia64/xen/xcom_hcall.c
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Tristan Gingold <tristan.gingold@bull.net>
- *
- * Copyright (c) 2007
- * Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * consolidate mini and inline version.
- */
-
-#include <linux/module.h>
-#include <xen/interface/xen.h>
-#include <xen/interface/memory.h>
-#include <xen/interface/grant_table.h>
-#include <xen/interface/callback.h>
-#include <xen/interface/vcpu.h>
-#include <asm/xen/hypervisor.h>
-#include <asm/xen/xencomm.h>
-
-/* Xencomm notes:
- * This file defines hypercalls to be used by xencomm. The hypercalls simply
- * create inlines or mini descriptors for pointers and then call the raw arch
- * hypercall xencomm_arch_hypercall_XXX
- *
- * If the arch wants to directly use these hypercalls, simply define macros
- * in asm/xen/hypercall.h, eg:
- * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
- *
- * The arch may also define HYPERVISOR_xxx as a function and do more operations
- * before/after doing the hypercall.
- *
- * Note: because only inline or mini descriptors are created these functions
- * must only be called with in kernel memory parameters.
- */
-
-int
-xencomm_hypercall_console_io(int cmd, int count, char *str)
-{
- /* xen early printk uses console io hypercall before
- * xencomm initialization. In that case, we just ignore it.
- */
- if (!xencomm_is_initialized())
- return 0;
-
- return xencomm_arch_hypercall_console_io
- (cmd, count, xencomm_map_no_alloc(str, count));
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
-
-int
-xencomm_hypercall_event_channel_op(int cmd, void *op)
-{
- struct xencomm_handle *desc;
- desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
- if (desc == NULL)
- return -EINVAL;
-
- return xencomm_arch_hypercall_event_channel_op(cmd, desc);
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
-
-int
-xencomm_hypercall_xen_version(int cmd, void *arg)
-{
- struct xencomm_handle *desc;
- unsigned int argsize;
-
- switch (cmd) {
- case XENVER_version:
- /* do not actually pass an argument */
- return xencomm_arch_hypercall_xen_version(cmd, 0);
- case XENVER_extraversion:
- argsize = sizeof(struct xen_extraversion);
- break;
- case XENVER_compile_info:
- argsize = sizeof(struct xen_compile_info);
- break;
- case XENVER_capabilities:
- argsize = sizeof(struct xen_capabilities_info);
- break;
- case XENVER_changeset:
- argsize = sizeof(struct xen_changeset_info);
- break;
- case XENVER_platform_parameters:
- argsize = sizeof(struct xen_platform_parameters);
- break;
- case XENVER_get_features:
- argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
- break;
-
- default:
- printk(KERN_DEBUG
- "%s: unknown version op %d\n", __func__, cmd);
- return -ENOSYS;
- }
-
- desc = xencomm_map_no_alloc(arg, argsize);
- if (desc == NULL)
- return -EINVAL;
-
- return xencomm_arch_hypercall_xen_version(cmd, desc);
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
-
-int
-xencomm_hypercall_physdev_op(int cmd, void *op)
-{
- unsigned int argsize;
-
- switch (cmd) {
- case PHYSDEVOP_apic_read:
- case PHYSDEVOP_apic_write:
- argsize = sizeof(struct physdev_apic);
- break;
- case PHYSDEVOP_alloc_irq_vector:
- case PHYSDEVOP_free_irq_vector:
- argsize = sizeof(struct physdev_irq);
- break;
- case PHYSDEVOP_irq_status_query:
- argsize = sizeof(struct physdev_irq_status_query);
- break;
-
- default:
- printk(KERN_DEBUG
- "%s: unknown physdev op %d\n", __func__, cmd);
- return -ENOSYS;
- }
-
- return xencomm_arch_hypercall_physdev_op
- (cmd, xencomm_map_no_alloc(op, argsize));
-}
-
-static int
-xencommize_grant_table_op(struct xencomm_mini **xc_area,
- unsigned int cmd, void *op, unsigned int count,
- struct xencomm_handle **desc)
-{
- struct xencomm_handle *desc1;
- unsigned int argsize;
-
- switch (cmd) {
- case GNTTABOP_map_grant_ref:
- argsize = sizeof(struct gnttab_map_grant_ref);
- break;
- case GNTTABOP_unmap_grant_ref:
- argsize = sizeof(struct gnttab_unmap_grant_ref);
- break;
- case GNTTABOP_setup_table:
- {
- struct gnttab_setup_table *setup = op;
-
- argsize = sizeof(*setup);
-
- if (count != 1)
- return -EINVAL;
- desc1 = __xencomm_map_no_alloc
- (xen_guest_handle(setup->frame_list),
- setup->nr_frames *
- sizeof(*xen_guest_handle(setup->frame_list)),
- *xc_area);
- if (desc1 == NULL)
- return -EINVAL;
- (*xc_area)++;
- set_xen_guest_handle(setup->frame_list, (void *)desc1);
- break;
- }
- case GNTTABOP_dump_table:
- argsize = sizeof(struct gnttab_dump_table);
- break;
- case GNTTABOP_transfer:
- argsize = sizeof(struct gnttab_transfer);
- break;
- case GNTTABOP_copy:
- argsize = sizeof(struct gnttab_copy);
- break;
- case GNTTABOP_query_size:
- argsize = sizeof(struct gnttab_query_size);
- break;
- default:
- printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
- __func__, cmd);
- BUG();
- }
-
- *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
- if (*desc == NULL)
- return -EINVAL;
- (*xc_area)++;
-
- return 0;
-}
-
-int
-xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
- unsigned int count)
-{
- int rc;
- struct xencomm_handle *desc;
- XENCOMM_MINI_ALIGNED(xc_area, 2);
-
- rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
- if (rc)
- return rc;
-
- return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
-
-int
-xencomm_hypercall_sched_op(int cmd, void *arg)
-{
- struct xencomm_handle *desc;
- unsigned int argsize;
-
- switch (cmd) {
- case SCHEDOP_yield:
- case SCHEDOP_block:
- argsize = 0;
- break;
- case SCHEDOP_shutdown:
- argsize = sizeof(struct sched_shutdown);
- break;
- case SCHEDOP_poll:
- {
- struct sched_poll *poll = arg;
- struct xencomm_handle *ports;
-
- argsize = sizeof(struct sched_poll);
- ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
- sizeof(*xen_guest_handle(poll->ports)));
-
- set_xen_guest_handle(poll->ports, (void *)ports);
- break;
- }
- default:
- printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
- return -ENOSYS;
- }
-
- desc = xencomm_map_no_alloc(arg, argsize);
- if (desc == NULL)
- return -EINVAL;
-
- return xencomm_arch_hypercall_sched_op(cmd, desc);
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
-
-int
-xencomm_hypercall_multicall(void *call_list, int nr_calls)
-{
- int rc;
- int i;
- struct multicall_entry *mce;
- struct xencomm_handle *desc;
- XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
-
- for (i = 0; i < nr_calls; i++) {
- mce = (struct multicall_entry *)call_list + i;
-
- switch (mce->op) {
- case __HYPERVISOR_update_va_mapping:
- case __HYPERVISOR_mmu_update:
- /* No-op on ia64. */
- break;
- case __HYPERVISOR_grant_table_op:
- rc = xencommize_grant_table_op
- (&xc_area,
- mce->args[0], (void *)mce->args[1],
- mce->args[2], &desc);
- if (rc)
- return rc;
- mce->args[1] = (unsigned long)desc;
- break;
- case __HYPERVISOR_memory_op:
- default:
- printk(KERN_DEBUG
- "%s: unhandled multicall op entry op %lu\n",
- __func__, mce->op);
- return -ENOSYS;
- }
- }
-
- desc = xencomm_map_no_alloc(call_list,
- nr_calls * sizeof(struct multicall_entry));
- if (desc == NULL)
- return -EINVAL;
-
- return xencomm_arch_hypercall_multicall(desc, nr_calls);
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
-
-int
-xencomm_hypercall_callback_op(int cmd, void *arg)
-{
- unsigned int argsize;
- switch (cmd) {
- case CALLBACKOP_register:
- argsize = sizeof(struct callback_register);
- break;
- case CALLBACKOP_unregister:
- argsize = sizeof(struct callback_unregister);
- break;
- default:
- printk(KERN_DEBUG
- "%s: unknown callback op %d\n", __func__, cmd);
- return -ENOSYS;
- }
-
- return xencomm_arch_hypercall_callback_op
- (cmd, xencomm_map_no_alloc(arg, argsize));
-}
-
-static int
-xencommize_memory_reservation(struct xencomm_mini *xc_area,
- struct xen_memory_reservation *mop)
-{
- struct xencomm_handle *desc;
-
- desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
- mop->nr_extents *
- sizeof(*xen_guest_handle(mop->extent_start)),
- xc_area);
- if (desc == NULL)
- return -EINVAL;
-
- set_xen_guest_handle(mop->extent_start, (void *)desc);
- return 0;
-}
-
-int
-xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
-{
- GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
- struct xen_memory_reservation *xmr = NULL;
- int rc;
- struct xencomm_handle *desc;
- unsigned int argsize;
- XENCOMM_MINI_ALIGNED(xc_area, 2);
-
- switch (cmd) {
- case XENMEM_increase_reservation:
- case XENMEM_decrease_reservation:
- case XENMEM_populate_physmap:
- xmr = (struct xen_memory_reservation *)arg;
- set_xen_guest_handle(extent_start_va[0],
- xen_guest_handle(xmr->extent_start));
-
- argsize = sizeof(*xmr);
- rc = xencommize_memory_reservation(xc_area, xmr);
- if (rc)
- return rc;
- xc_area++;
- break;
-
- case XENMEM_maximum_ram_page:
- argsize = 0;
- break;
-
- case XENMEM_add_to_physmap:
- argsize = sizeof(struct xen_add_to_physmap);
- break;
-
- default:
- printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
- return -ENOSYS;
- }
-
- desc = xencomm_map_no_alloc(arg, argsize);
- if (desc == NULL)
- return -EINVAL;
-
- rc = xencomm_arch_hypercall_memory_op(cmd, desc);
-
- switch (cmd) {
- case XENMEM_increase_reservation:
- case XENMEM_decrease_reservation:
- case XENMEM_populate_physmap:
- set_xen_guest_handle(xmr->extent_start,
- xen_guest_handle(extent_start_va[0]));
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
-
-int
-xencomm_hypercall_suspend(unsigned long srec)
-{
- struct sched_shutdown arg;
-
- arg.reason = SHUTDOWN_suspend;
-
- return xencomm_arch_hypercall_sched_op(
- SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
-}
-
-long
-xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
-{
- unsigned int argsize;
- switch (cmd) {
- case VCPUOP_register_runstate_memory_area: {
- struct vcpu_register_runstate_memory_area *area =
- (struct vcpu_register_runstate_memory_area *)arg;
- argsize = sizeof(*arg);
- set_xen_guest_handle(area->addr.h,
- (void *)xencomm_map_no_alloc(area->addr.v,
- sizeof(area->addr.v)));
- break;
- }
-
- default:
- printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
- return -ENOSYS;
- }
-
- return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
- xencomm_map_no_alloc(arg, argsize));
-}
-
-long
-xencomm_hypercall_opt_feature(void *arg)
-{
- return xencomm_arch_hypercall_opt_feature(
- xencomm_map_no_alloc(arg,
- sizeof(struct xen_ia64_opt_feature)));
-}
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
deleted file mode 100644
index 3e8d350..0000000
--- a/arch/ia64/xen/xen_pv_ops.c
+++ /dev/null
@@ -1,1141 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/xen_pv_ops.c
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/console.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/pm.h>
-#include <linux/unistd.h>
-
-#include <asm/xen/hypervisor.h>
-#include <asm/xen/xencomm.h>
-#include <asm/xen/privop.h>
-
-#include "irq_xen.h"
-#include "time.h"
-
-/***************************************************************************
- * general info
- */
-static struct pv_info xen_info __initdata = {
- .kernel_rpl = 2, /* or 1: determin at runtime */
- .paravirt_enabled = 1,
- .name = "Xen/ia64",
-};
-
-#define IA64_RSC_PL_SHIFT 2
-#define IA64_RSC_PL_BIT_SIZE 2
-#define IA64_RSC_PL_MASK \
- (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
-
-static void __init
-xen_info_init(void)
-{
- /* Xenified Linux/ia64 may run on pl = 1 or 2.
- * determin at run time. */
- unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
- unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
- xen_info.kernel_rpl = rpl;
-}
-
-/***************************************************************************
- * pv_init_ops
- * initialization hooks.
- */
-
-static void
-xen_panic_hypercall(struct unw_frame_info *info, void *arg)
-{
- current->thread.ksp = (__u64)info->sw - 16;
- HYPERVISOR_shutdown(SHUTDOWN_crash);
- /* we're never actually going to get here... */
-}
-
-static int
-xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- unw_init_running(xen_panic_hypercall, NULL);
- /* we're never actually going to get here... */
- return NOTIFY_DONE;
-}
-
-static struct notifier_block xen_panic_block = {
- xen_panic_event, NULL, 0 /* try to go last */
-};
-
-static void xen_pm_power_off(void)
-{
- local_irq_disable();
- HYPERVISOR_shutdown(SHUTDOWN_poweroff);
-}
-
-static void __init
-xen_banner(void)
-{
- printk(KERN_INFO
- "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
- "flags=0x%x\n",
- xen_info.kernel_rpl,
- HYPERVISOR_shared_info->arch.start_info_pfn,
- xen_start_info->nr_pages, xen_start_info->flags);
-}
-
-static int __init
-xen_reserve_memory(struct rsvd_region *region)
-{
- region->start = (unsigned long)__va(
- (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
- region->end = region->start + PAGE_SIZE;
- return 1;
-}
-
-static void __init
-xen_arch_setup_early(void)
-{
- struct shared_info *s;
- BUG_ON(!xen_pv_domain());
-
- s = HYPERVISOR_shared_info;
- xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
-
- /* Must be done before any hypercall. */
- xencomm_initialize();
-
- xen_setup_features();
- /* Register a call for panic conditions. */
- atomic_notifier_chain_register(&panic_notifier_list,
- &xen_panic_block);
- pm_power_off = xen_pm_power_off;
-
- xen_ia64_enable_opt_feature();
-}
-
-static void __init
-xen_arch_setup_console(char **cmdline_p)
-{
- add_preferred_console("xenboot", 0, NULL);
- add_preferred_console("tty", 0, NULL);
- /* use hvc_xen */
- add_preferred_console("hvc", 0, NULL);
-
-#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = NULL;
-#endif
-}
-
-static int __init
-xen_arch_setup_nomca(void)
-{
- return 1;
-}
-
-static void __init
-xen_post_smp_prepare_boot_cpu(void)
-{
- xen_setup_vcpu_info_placement();
-}
-
-#ifdef ASM_SUPPORTED
-static unsigned long __init_or_module
-xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
-#endif
-static void __init
-xen_patch_branch(unsigned long tag, unsigned long type);
-
-static const struct pv_init_ops xen_init_ops __initconst = {
- .banner = xen_banner,
-
- .reserve_memory = xen_reserve_memory,
-
- .arch_setup_early = xen_arch_setup_early,
- .arch_setup_console = xen_arch_setup_console,
- .arch_setup_nomca = xen_arch_setup_nomca,
-
- .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
-#ifdef ASM_SUPPORTED
- .patch_bundle = xen_patch_bundle,
-#endif
- .patch_branch = xen_patch_branch,
-};
-
-/***************************************************************************
- * pv_fsys_data
- * addresses for fsys
- */
-
-extern unsigned long xen_fsyscall_table[NR_syscalls];
-extern char xen_fsys_bubble_down[];
-struct pv_fsys_data xen_fsys_data __initdata = {
- .fsyscall_table = (unsigned long *)xen_fsyscall_table,
- .fsys_bubble_down = (void *)xen_fsys_bubble_down,
-};
-
-/***************************************************************************
- * pv_patchdata
- * patchdata addresses
- */
-
-#define DECLARE(name) \
- extern unsigned long __xen_start_gate_##name##_patchlist[]; \
- extern unsigned long __xen_end_gate_##name##_patchlist[]
-
-DECLARE(fsyscall);
-DECLARE(brl_fsys_bubble_down);
-DECLARE(vtop);
-DECLARE(mckinley_e9);
-
-extern unsigned long __xen_start_gate_section[];
-
-#define ASSIGN(name) \
- .start_##name##_patchlist = \
- (unsigned long)__xen_start_gate_##name##_patchlist, \
- .end_##name##_patchlist = \
- (unsigned long)__xen_end_gate_##name##_patchlist
-
-static struct pv_patchdata xen_patchdata __initdata = {
- ASSIGN(fsyscall),
- ASSIGN(brl_fsys_bubble_down),
- ASSIGN(vtop),
- ASSIGN(mckinley_e9),
-
- .gate_section = (void*)__xen_start_gate_section,
-};
-
-/***************************************************************************
- * pv_cpu_ops
- * intrinsics hooks.
- */
-
-#ifndef ASM_SUPPORTED
-static void
-xen_set_itm_with_offset(unsigned long val)
-{
- /* ia64_cpu_local_tick() calls this with interrupt enabled. */
- /* WARN_ON(!irqs_disabled()); */
- xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
-}
-
-static unsigned long
-xen_get_itm_with_offset(void)
-{
- /* unused at this moment */
- printk(KERN_DEBUG "%s is called.\n", __func__);
-
- WARN_ON(!irqs_disabled());
- return ia64_native_getreg(_IA64_REG_CR_ITM) +
- XEN_MAPPEDREGS->itc_offset;
-}
-
-/* ia64_set_itc() is only called by
- * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
- * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
- */
-static void
-xen_set_itc(unsigned long val)
-{
- unsigned long mitc;
-
- WARN_ON(!irqs_disabled());
- mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
- XEN_MAPPEDREGS->itc_offset = val - mitc;
- XEN_MAPPEDREGS->itc_last = val;
-}
-
-static unsigned long
-xen_get_itc(void)
-{
- unsigned long res;
- unsigned long itc_offset;
- unsigned long itc_last;
- unsigned long ret_itc_last;
-
- itc_offset = XEN_MAPPEDREGS->itc_offset;
- do {
- itc_last = XEN_MAPPEDREGS->itc_last;
- res = ia64_native_getreg(_IA64_REG_AR_ITC);
- res += itc_offset;
- if (itc_last >= res)
- res = itc_last + 1;
- ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
- itc_last, res);
- } while (unlikely(ret_itc_last != itc_last));
- return res;
-
-#if 0
- /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
- Should it be paravirtualized instead? */
- WARN_ON(!irqs_disabled());
- itc_offset = XEN_MAPPEDREGS->itc_offset;
- itc_last = XEN_MAPPEDREGS->itc_last;
- res = ia64_native_getreg(_IA64_REG_AR_ITC);
- res += itc_offset;
- if (itc_last >= res)
- res = itc_last + 1;
- XEN_MAPPEDREGS->itc_last = res;
- return res;
-#endif
-}
-
-static void xen_setreg(int regnum, unsigned long val)
-{
- switch (regnum) {
- case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
- xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
- break;
- case _IA64_REG_AR_ITC:
- xen_set_itc(val);
- break;
- case _IA64_REG_CR_TPR:
- xen_set_tpr(val);
- break;
- case _IA64_REG_CR_ITM:
- xen_set_itm_with_offset(val);
- break;
- case _IA64_REG_CR_EOI:
- xen_eoi(val);
- break;
- default:
- ia64_native_setreg_func(regnum, val);
- break;
- }
-}
-
-static unsigned long xen_getreg(int regnum)
-{
- unsigned long res;
-
- switch (regnum) {
- case _IA64_REG_PSR:
- res = xen_get_psr();
- break;
- case _IA64_REG_AR_ITC:
- res = xen_get_itc();
- break;
- case _IA64_REG_CR_ITM:
- res = xen_get_itm_with_offset();
- break;
- case _IA64_REG_CR_IVR:
- res = xen_get_ivr();
- break;
- case _IA64_REG_CR_TPR:
- res = xen_get_tpr();
- break;
- default:
- res = ia64_native_getreg_func(regnum);
- break;
- }
- return res;
-}
-
-/* turning on interrupts is a bit more complicated.. write to the
- * memory-mapped virtual psr.i bit first (to avoid race condition),
- * then if any interrupts were pending, we have to execute a hyperprivop
- * to ensure the pending interrupt gets delivered; else we're done! */
-static void
-xen_ssm_i(void)
-{
- int old = xen_get_virtual_psr_i();
- xen_set_virtual_psr_i(1);
- barrier();
- if (!old && xen_get_virtual_pend())
- xen_hyper_ssm_i();
-}
-
-/* turning off interrupts can be paravirtualized simply by writing
- * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
-static void
-xen_rsm_i(void)
-{
- xen_set_virtual_psr_i(0);
- barrier();
-}
-
-static unsigned long
-xen_get_psr_i(void)
-{
- return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
-}
-
-static void
-xen_intrin_local_irq_restore(unsigned long mask)
-{
- if (mask & IA64_PSR_I)
- xen_ssm_i();
- else
- xen_rsm_i();
-}
-#else
-#define __DEFINE_FUNC(name, code) \
- extern const char xen_ ## name ## _direct_start[]; \
- extern const char xen_ ## name ## _direct_end[]; \
- asm (".align 32\n" \
- ".proc xen_" #name "\n" \
- "xen_" #name ":\n" \
- "xen_" #name "_direct_start:\n" \
- code \
- "xen_" #name "_direct_end:\n" \
- "br.cond.sptk.many b6\n" \
- ".endp xen_" #name "\n")
-
-#define DEFINE_VOID_FUNC0(name, code) \
- extern void \
- xen_ ## name (void); \
- __DEFINE_FUNC(name, code)
-
-#define DEFINE_VOID_FUNC1(name, code) \
- extern void \
- xen_ ## name (unsigned long arg); \
- __DEFINE_FUNC(name, code)
-
-#define DEFINE_VOID_FUNC1_VOID(name, code) \
- extern void \
- xen_ ## name (void *arg); \
- __DEFINE_FUNC(name, code)
-
-#define DEFINE_VOID_FUNC2(name, code) \
- extern void \
- xen_ ## name (unsigned long arg0, \
- unsigned long arg1); \
- __DEFINE_FUNC(name, code)
-
-#define DEFINE_FUNC0(name, code) \
- extern unsigned long \
- xen_ ## name (void); \
- __DEFINE_FUNC(name, code)
-
-#define DEFINE_FUNC1(name, type, code) \
- extern unsigned long \
- xen_ ## name (type arg); \
- __DEFINE_FUNC(name, code)
-
-#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
-
-/*
- * static void xen_set_itm_with_offset(unsigned long val)
- * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
- */
-/* 2 bundles */
-DEFINE_VOID_FUNC1(set_itm_with_offset,
- "mov r2 = " __stringify(XSI_BASE) " + "
- __stringify(XSI_ITC_OFFSET_OFS) "\n"
- ";;\n"
- "ld8 r3 = [r2]\n"
- ";;\n"
- "sub r8 = r8, r3\n"
- "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
-
-/*
- * static unsigned long xen_get_itm_with_offset(void)
- * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
- */
-/* 2 bundles */
-DEFINE_FUNC0(get_itm_with_offset,
- "mov r2 = " __stringify(XSI_BASE) " + "
- __stringify(XSI_ITC_OFFSET_OFS) "\n"
- ";;\n"
- "ld8 r3 = [r2]\n"
- "mov r8 = cr.itm\n"
- ";;\n"
- "add r8 = r8, r2\n");
-
-/*
- * static void xen_set_itc(unsigned long val)
- * unsigned long mitc;
- *
- * WARN_ON(!irqs_disabled());
- * mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
- * XEN_MAPPEDREGS->itc_offset = val - mitc;
- * XEN_MAPPEDREGS->itc_last = val;
- */
-/* 2 bundles */
-DEFINE_VOID_FUNC1(set_itc,
- "mov r2 = " __stringify(XSI_BASE) " + "
- __stringify(XSI_ITC_LAST_OFS) "\n"
- "mov r3 = ar.itc\n"
- ";;\n"
- "sub r3 = r8, r3\n"
- "st8 [r2] = r8, "
- __stringify(XSI_ITC_LAST_OFS) " - "
- __stringify(XSI_ITC_OFFSET_OFS) "\n"
- ";;\n"
- "st8 [r2] = r3\n");
-
-/*
- * static unsigned long xen_get_itc(void)
- * unsigned long res;
- * unsigned long itc_offset;
- * unsigned long itc_last;
- * unsigned long ret_itc_last;
- *
- * itc_offset = XEN_MAPPEDREGS->itc_offset;
- * do {
- * itc_last = XEN_MAPPEDREGS->itc_last;
- * res = ia64_native_getreg(_IA64_REG_AR_ITC);
- * res += itc_offset;
- * if (itc_last >= res)
- * res = itc_last + 1;
- * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
- * itc_last, res);
- * } while (unlikely(ret_itc_last != itc_last));
- * return res;
- */
-/* 5 bundles */
-DEFINE_FUNC0(get_itc,
- "mov r2 = " __stringify(XSI_BASE) " + "
- __stringify(XSI_ITC_OFFSET_OFS) "\n"
- ";;\n"
- "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
- __stringify(XSI_ITC_OFFSET_OFS) "\n"
- /* r9 = itc_offset */
- /* r2 = XSI_ITC_OFFSET */
- "888:\n"
- "mov r8 = ar.itc\n" /* res = ar.itc */
- ";;\n"
- "ld8 r3 = [r2]\n" /* r3 = itc_last */
- "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
- ";;\n"
- "cmp.gtu p6, p0 = r3, r8\n"
- ";;\n"
- "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
- ";;\n"
- "mov ar.ccv = r8\n"
- ";;\n"
- "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
- ";;\n"
- "cmp.ne p6, p0 = r10, r3\n"
- "(p6) hint @pause\n"
- "(p6) br.cond.spnt 888b\n");
-
-DEFINE_VOID_FUNC1_VOID(fc,
- "break " __stringify(HYPERPRIVOP_FC) "\n");
-
-/*
- * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
- * masked_addr = *psr_i_addr_addr
- * pending_intr_addr = masked_addr - 1
- * if (val & IA64_PSR_I) {
- * masked = *masked_addr
- * *masked_addr = 0:xen_set_virtual_psr_i(1)
- * compiler barrier
- * if (masked) {
- * uint8_t pending = *pending_intr_addr;
- * if (pending)
- * XEN_HYPER_SSM_I
- * }
- * } else {
- * *masked_addr = 1:xen_set_virtual_psr_i(0)
- * }
- */
-/* 6 bundles */
-DEFINE_VOID_FUNC1(intrin_local_irq_restore,
- /* r8 = input value: 0 or IA64_PSR_I
- * p6 = (flags & IA64_PSR_I)
- * = if clause
- * p7 = !(flags & IA64_PSR_I)
- * = else clause
- */
- "cmp.ne p6, p7 = r8, r0\n"
- "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
- ";;\n"
- /* r9 = XEN_PSR_I_ADDR */
- "ld8 r9 = [r9]\n"
- ";;\n"
-
- /* r10 = masked previous value */
- "(p6) ld1.acq r10 = [r9]\n"
- ";;\n"
-
- /* p8 = !masked interrupt masked previously? */
- "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
-
- /* p7 = else clause */
- "(p7) mov r11 = 1\n"
- ";;\n"
- /* masked = 1 */
- "(p7) st1.rel [r9] = r11\n"
-
- /* p6 = if clause */
- /* masked = 0
- * r9 = masked_addr - 1
- * = pending_intr_addr
- */
- "(p8) st1.rel [r9] = r0, -1\n"
- ";;\n"
- /* r8 = pending_intr */
- "(p8) ld1.acq r11 = [r9]\n"
- ";;\n"
- /* p9 = interrupt pending? */
- "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
- ";;\n"
- "(p10) mf\n"
- /* issue hypercall to trigger interrupt */
- "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
-
-DEFINE_VOID_FUNC2(ptcga,
- "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
-DEFINE_VOID_FUNC2(set_rr,
- "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
-
-/*
- * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
- * tmp = *tmp
- * tmp = *tmp;
- * psr_i = tmp? 0: IA64_PSR_I;
- */
-/* 4 bundles */
-DEFINE_FUNC0(get_psr_i,
- "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
- ";;\n"
- "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
- "mov r8 = 0\n" /* psr_i = 0 */
- ";;\n"
- "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
- ";;\n"
- "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
- ";;\n"
- "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
-
-DEFINE_FUNC1(thash, unsigned long,
- "break " __stringify(HYPERPRIVOP_THASH) "\n");
-DEFINE_FUNC1(get_cpuid, int,
- "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
-DEFINE_FUNC1(get_pmd, int,
- "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
-DEFINE_FUNC1(get_rr, unsigned long,
- "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
-
-/*
- * void xen_privop_ssm_i(void)
- *
- * int masked = !xen_get_virtual_psr_i();
- * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
- * xen_set_virtual_psr_i(1)
- * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
- * // compiler barrier
- * if (masked) {
- * uint8_t* pend_int_addr =
- * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
- * uint8_t pending = *pend_int_addr;
- * if (pending)
- * XEN_HYPER_SSM_I
- * }
- */
-/* 4 bundles */
-DEFINE_VOID_FUNC0(ssm_i,
- "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
- ";;\n"
- "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
- ";;\n"
- "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
- ";;\n"
- "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
- * r8 = XEN_PSR_I_ADDR - 1
- * = pend_int_addr
- */
- "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
- * previously interrupt
- * masked?
- */
- ";;\n"
- "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
- ";;\n"
- "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
- ";;\n"
- /* issue hypercall to get interrupt */
- "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
- ";;\n");
-
-/*
- * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
- * = XEN_PSR_I_ADDR_ADDR;
- * psr_i_addr = *psr_i_addr_addr;
- * *psr_i_addr = 1;
- */
-/* 2 bundles */
-DEFINE_VOID_FUNC0(rsm_i,
- "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
- /* r8 = XEN_PSR_I_ADDR */
- "mov r9 = 1\n"
- ";;\n"
- "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
- ";;\n"
- "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
-
-extern void
-xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
- unsigned long val2, unsigned long val3,
- unsigned long val4);
-__DEFINE_FUNC(set_rr0_to_rr4,
- "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
-
-
-extern unsigned long xen_getreg(int regnum);
-#define __DEFINE_GET_REG(id, privop) \
- "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
- ";;\n" \
- "cmp.eq p6, p0 = r2, r8\n" \
- ";;\n" \
- "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
- "(p6) br.cond.sptk.many b6\n" \
- ";;\n"
-
-__DEFINE_FUNC(getreg,
- __DEFINE_GET_REG(PSR, PSR)
-
- /* get_itc */
- "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
- ";;\n"
- "cmp.eq p6, p0 = r2, r8\n"
- ";;\n"
- "(p6) br.cond.spnt xen_get_itc\n"
- ";;\n"
-
- /* get itm */
- "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
- ";;\n"
- "cmp.eq p6, p0 = r2, r8\n"
- ";;\n"
- "(p6) br.cond.spnt xen_get_itm_with_offset\n"
- ";;\n"
-
- __DEFINE_GET_REG(CR_IVR, IVR)
- __DEFINE_GET_REG(CR_TPR, TPR)
-
- /* fall back */
- "movl r2 = ia64_native_getreg_func\n"
- ";;\n"
- "mov b7 = r2\n"
- ";;\n"
- "br.cond.sptk.many b7\n");
-
-extern void xen_setreg(int regnum, unsigned long val);
-#define __DEFINE_SET_REG(id, privop) \
- "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
- ";;\n" \
- "cmp.eq p6, p0 = r2, r9\n" \
- ";;\n" \
- "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
- "(p6) br.cond.sptk.many b6\n" \
- ";;\n"
-
-__DEFINE_FUNC(setreg,
- /* kr0 .. kr 7*/
- /*
- * if (_IA64_REG_AR_KR0 <= regnum &&
- * regnum <= _IA64_REG_AR_KR7) {
- * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
- * register __val asm ("r9") = val
- * "break HYPERPRIVOP_SET_KR"
- * }
- */
- "mov r17 = r9\n"
- "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
- ";;\n"
- "cmp.ge p6, p0 = r9, r2\n"
- "sub r17 = r17, r2\n"
- ";;\n"
- "(p6) cmp.ge.unc p7, p0 = "
- __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
- ", r17\n"
- ";;\n"
- "(p7) mov r9 = r8\n"
- ";;\n"
- "(p7) mov r8 = r17\n"
- "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
-
- /* set itm */
- "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
- ";;\n"
- "cmp.eq p6, p0 = r2, r8\n"
- ";;\n"
- "(p6) br.cond.spnt xen_set_itm_with_offset\n"
-
- /* set itc */
- "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
- ";;\n"
- "cmp.eq p6, p0 = r2, r8\n"
- ";;\n"
- "(p6) br.cond.spnt xen_set_itc\n"
-
- __DEFINE_SET_REG(CR_TPR, SET_TPR)
- __DEFINE_SET_REG(CR_EOI, EOI)
-
- /* fall back */
- "movl r2 = ia64_native_setreg_func\n"
- ";;\n"
- "mov b7 = r2\n"
- ";;\n"
- "br.cond.sptk.many b7\n");
-#endif
-
-static const struct pv_cpu_ops xen_cpu_ops __initconst = {
- .fc = xen_fc,
- .thash = xen_thash,
- .get_cpuid = xen_get_cpuid,
- .get_pmd = xen_get_pmd,
- .getreg = xen_getreg,
- .setreg = xen_setreg,
- .ptcga = xen_ptcga,
- .get_rr = xen_get_rr,
- .set_rr = xen_set_rr,
- .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
- .ssm_i = xen_ssm_i,
- .rsm_i = xen_rsm_i,
- .get_psr_i = xen_get_psr_i,
- .intrin_local_irq_restore
- = xen_intrin_local_irq_restore,
-};
-
-/******************************************************************************
- * replacement of hand written assembly codes.
- */
-
-extern char xen_switch_to;
-extern char xen_leave_syscall;
-extern char xen_work_processed_syscall;
-extern char xen_leave_kernel;
-
-const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
- .switch_to = (unsigned long)&xen_switch_to,
- .leave_syscall = (unsigned long)&xen_leave_syscall,
- .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
- .leave_kernel = (unsigned long)&xen_leave_kernel,
-};
-
-/***************************************************************************
- * pv_iosapic_ops
- * iosapic read/write hooks.
- */
-static void
-xen_pcat_compat_init(void)
-{
- /* nothing */
-}
-
-static struct irq_chip*
-xen_iosapic_get_irq_chip(unsigned long trigger)
-{
- return NULL;
-}
-
-static unsigned int
-xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
-{
- struct physdev_apic apic_op;
- int ret;
-
- apic_op.apic_physbase = (unsigned long)iosapic -
- __IA64_UNCACHED_OFFSET;
- apic_op.reg = reg;
- ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
- if (ret)
- return ret;
- return apic_op.value;
-}
-
-static void
-xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
-{
- struct physdev_apic apic_op;
-
- apic_op.apic_physbase = (unsigned long)iosapic -
- __IA64_UNCACHED_OFFSET;
- apic_op.reg = reg;
- apic_op.value = val;
- HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
-}
-
-static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
- .pcat_compat_init = xen_pcat_compat_init,
- .__get_irq_chip = xen_iosapic_get_irq_chip,
-
- .__read = xen_iosapic_read,
- .__write = xen_iosapic_write,
-};
-
-/***************************************************************************
- * pv_ops initialization
- */
-
-void __init
-xen_setup_pv_ops(void)
-{
- xen_info_init();
- pv_info = xen_info;
- pv_init_ops = xen_init_ops;
- pv_fsys_data = xen_fsys_data;
- pv_patchdata = xen_patchdata;
- pv_cpu_ops = xen_cpu_ops;
- pv_iosapic_ops = xen_iosapic_ops;
- pv_irq_ops = xen_irq_ops;
- pv_time_ops = xen_time_ops;
-
- paravirt_cpu_asm_init(&xen_cpu_asm_switch);
-}
-
-#ifdef ASM_SUPPORTED
-/***************************************************************************
- * binary pacthing
- * pv_init_ops.patch_bundle
- */
-
-#define DEFINE_FUNC_GETREG(name, privop) \
- DEFINE_FUNC0(get_ ## name, \
- "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
-
-DEFINE_FUNC_GETREG(psr, PSR);
-DEFINE_FUNC_GETREG(eflag, EFLAG);
-DEFINE_FUNC_GETREG(ivr, IVR);
-DEFINE_FUNC_GETREG(tpr, TPR);
-
-#define DEFINE_FUNC_SET_KR(n) \
- DEFINE_VOID_FUNC0(set_kr ## n, \
- ";;\n" \
- "mov r9 = r8\n" \
- "mov r8 = " #n "\n" \
- "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
-
-DEFINE_FUNC_SET_KR(0);
-DEFINE_FUNC_SET_KR(1);
-DEFINE_FUNC_SET_KR(2);
-DEFINE_FUNC_SET_KR(3);
-DEFINE_FUNC_SET_KR(4);
-DEFINE_FUNC_SET_KR(5);
-DEFINE_FUNC_SET_KR(6);
-DEFINE_FUNC_SET_KR(7);
-
-#define __DEFINE_FUNC_SETREG(name, privop) \
- DEFINE_VOID_FUNC0(name, \
- "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
-
-#define DEFINE_FUNC_SETREG(name, privop) \
- __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
-
-DEFINE_FUNC_SETREG(eflag, EFLAG);
-DEFINE_FUNC_SETREG(tpr, TPR);
-__DEFINE_FUNC_SETREG(eoi, EOI);
-
-extern const char xen_check_events[];
-extern const char __xen_intrin_local_irq_restore_direct_start[];
-extern const char __xen_intrin_local_irq_restore_direct_end[];
-extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
-
-asm (
- ".align 32\n"
- ".proc xen_check_events\n"
- "xen_check_events:\n"
- /* masked = 0
- * r9 = masked_addr - 1
- * = pending_intr_addr
- */
- "st1.rel [r9] = r0, -1\n"
- ";;\n"
- /* r8 = pending_intr */
- "ld1.acq r11 = [r9]\n"
- ";;\n"
- /* p9 = interrupt pending? */
- "cmp.ne p9, p10 = r11, r0\n"
- ";;\n"
- "(p10) mf\n"
- /* issue hypercall to trigger interrupt */
- "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
- "br.cond.sptk.many b6\n"
- ".endp xen_check_events\n"
- "\n"
- ".align 32\n"
- ".proc __xen_intrin_local_irq_restore_direct\n"
- "__xen_intrin_local_irq_restore_direct:\n"
- "__xen_intrin_local_irq_restore_direct_start:\n"
- "1:\n"
- "{\n"
- "cmp.ne p6, p7 = r8, r0\n"
- "mov r17 = ip\n" /* get ip to calc return address */
- "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
- ";;\n"
- "}\n"
- "{\n"
- /* r9 = XEN_PSR_I_ADDR */
- "ld8 r9 = [r9]\n"
- ";;\n"
- /* r10 = masked previous value */
- "(p6) ld1.acq r10 = [r9]\n"
- "adds r17 = 1f - 1b, r17\n" /* calculate return address */
- ";;\n"
- "}\n"
- "{\n"
- /* p8 = !masked interrupt masked previously? */
- "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
- "\n"
- /* p7 = else clause */
- "(p7) mov r11 = 1\n"
- ";;\n"
- "(p8) mov b6 = r17\n" /* set return address */
- "}\n"
- "{\n"
- /* masked = 1 */
- "(p7) st1.rel [r9] = r11\n"
- "\n"
- "[99:]\n"
- "(p8) brl.cond.dptk.few xen_check_events\n"
- "}\n"
- /* pv calling stub is 5 bundles. fill nop to adjust return address */
- "{\n"
- "nop 0\n"
- "nop 0\n"
- "nop 0\n"
- "}\n"
- "1:\n"
- "__xen_intrin_local_irq_restore_direct_end:\n"
- ".endp __xen_intrin_local_irq_restore_direct\n"
- "\n"
- ".align 8\n"
- "__xen_intrin_local_irq_restore_direct_reloc:\n"
- "data8 99b\n"
-);
-
-static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
-__initdata_or_module =
-{
-#define XEN_PATCH_BUNDLE_ELEM(name, type) \
- { \
- (void*)xen_ ## name ## _direct_start, \
- (void*)xen_ ## name ## _direct_end, \
- PARAVIRT_PATCH_TYPE_ ## type, \
- }
-
- XEN_PATCH_BUNDLE_ELEM(fc, FC),
- XEN_PATCH_BUNDLE_ELEM(thash, THASH),
- XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
- XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
- XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
- XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
- XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
- XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
- XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
- XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
- XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
- {
- (void*)__xen_intrin_local_irq_restore_direct_start,
- (void*)__xen_intrin_local_irq_restore_direct_end,
- PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
- },
-
-#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
- { \
- xen_get_ ## name ## _direct_start, \
- xen_get_ ## name ## _direct_end, \
- PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
- }
-
- XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
- XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
-
- XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
- XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
-
- XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
- XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
-
-
-#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
- { \
- xen_ ## name ## _direct_start, \
- xen_ ## name ## _direct_end, \
- PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
- }
-
-#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
- __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
-
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
- XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
-
- XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
- XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
- __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
-
- XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
- XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
-};
-
-static unsigned long __init_or_module
-xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
-{
- const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
- sizeof(xen_patch_bundle_elems[0]);
- unsigned long used;
- const struct paravirt_patch_bundle_elem *found;
-
- used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
- xen_patch_bundle_elems, nelems,
- &found);
-
- if (found == NULL)
- /* fallback */
- return ia64_native_patch_bundle(sbundle, ebundle, type);
- if (used == 0)
- return used;
-
- /* relocation */
- switch (type) {
- case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
- unsigned long reloc =
- __xen_intrin_local_irq_restore_direct_reloc;
- unsigned long reloc_offset = reloc - (unsigned long)
- __xen_intrin_local_irq_restore_direct_start;
- unsigned long tag = (unsigned long)sbundle + reloc_offset;
- paravirt_patch_reloc_brl(tag, xen_check_events);
- break;
- }
- default:
- /* nothing */
- break;
- }
- return used;
-}
-#endif /* ASM_SUPPOTED */
-
-const struct paravirt_patch_branch_target xen_branch_target[]
-__initconst = {
-#define PARAVIRT_BR_TARGET(name, type) \
- { \
- &xen_ ## name, \
- PARAVIRT_PATCH_TYPE_BR_ ## type, \
- }
- PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
- PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
- PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
- PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
-};
-
-static void __init
-xen_patch_branch(unsigned long tag, unsigned long type)
-{
- __paravirt_patch_apply_branch(tag, type, xen_branch_target,
- ARRAY_SIZE(xen_branch_target));
-}
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c
deleted file mode 100644
index 73d903c..0000000
--- a/arch/ia64/xen/xencomm.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/mm.h>
-#include <linux/err.h>
-
-static unsigned long kernel_virtual_offset;
-static int is_xencomm_initialized;
-
-/* for xen early printk. It uses console io hypercall which uses xencomm.
- * However early printk may use it before xencomm initialization.
- */
-int
-xencomm_is_initialized(void)
-{
- return is_xencomm_initialized;
-}
-
-void
-xencomm_initialize(void)
-{
- kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
- is_xencomm_initialized = 1;
-}
-
-/* Translate virtual address to physical address. */
-unsigned long
-xencomm_vtop(unsigned long vaddr)
-{
- struct page *page;
- struct vm_area_struct *vma;
-
- if (vaddr == 0)
- return 0UL;
-
- if (REGION_NUMBER(vaddr) == 5) {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
-
- /* On ia64, TASK_SIZE refers to current. It is not initialized
- during boot.
- Furthermore the kernel is relocatable and __pa() doesn't
- work on addresses. */
- if (vaddr >= KERNEL_START
- && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
- return vaddr - kernel_virtual_offset;
-
- /* In kernel area -- virtually mapped. */
- pgd = pgd_offset_k(vaddr);
- if (pgd_none(*pgd) || pgd_bad(*pgd))
- return ~0UL;
-
- pud = pud_offset(pgd, vaddr);
- if (pud_none(*pud) || pud_bad(*pud))
- return ~0UL;
-
- pmd = pmd_offset(pud, vaddr);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- return ~0UL;
-
- ptep = pte_offset_kernel(pmd, vaddr);
- if (!ptep)
- return ~0UL;
-
- return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
- }
-
- if (vaddr > TASK_SIZE) {
- /* percpu variables */
- if (REGION_NUMBER(vaddr) == 7 &&
- REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
- ia64_tpa(vaddr);
-
- /* kernel address */
- return __pa(vaddr);
- }
-
- /* XXX double-check (lack of) locking */
- vma = find_extend_vma(current->mm, vaddr);
- if (!vma)
- return ~0UL;
-
- /* We assume the page is modified. */
- page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
- if (IS_ERR_OR_NULL(page))
- return ~0UL;
-
- return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
-}
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
deleted file mode 100644
index 3e71d50..0000000
--- a/arch/ia64/xen/xenivt.S
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * arch/ia64/xen/ivt.S
- *
- * Copyright (C) 2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * pv_ops.
- */
-
-#include <asm/asmmacro.h>
-#include <asm/kregs.h>
-#include <asm/pgtable.h>
-
-#include "../kernel/minstate.h"
-
- .section .text,"ax"
-GLOBAL_ENTRY(xen_event_callback)
- mov r31=pr // prepare to save predicates
- ;;
- SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
- ;;
- movl r3=XSI_PSR_IC
- mov r14=1
- ;;
- st4 [r3]=r14
- ;;
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- srlz.i // ensure everybody knows psr.ic is back on
- ;;
- SAVE_REST
- ;;
-1:
- alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
- add out0=16,sp // pass pointer to pt_regs as first arg
- ;;
- br.call.sptk.many b0=xen_evtchn_do_upcall
- ;;
- movl r20=XSI_PSR_I_ADDR
- ;;
- ld8 r20=[r20]
- ;;
- adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
- ;;
- ld1 r20=[r20]
- ;;
- cmp.ne p6,p0=r20,r0 // if there are pending events,
- (p6) br.spnt.few 1b // call evtchn_do_upcall again.
- br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is
- // paravirtualized as xen_leave_kernel
-END(xen_event_callback)
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
deleted file mode 100644
index e29519e..0000000
--- a/arch/ia64/xen/xensetup.S
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Support routines for Xen
- *
- * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
- */
-
-#include <asm/processor.h>
-#include <asm/asmmacro.h>
-#include <asm/pgtable.h>
-#include <asm/paravirt.h>
-#include <asm/xen/privop.h>
-#include <linux/elfnote.h>
-#include <linux/init.h>
-#include <xen/interface/elfnote.h>
-
- .section .data..read_mostly
- .align 8
- .global xen_domain_type
-xen_domain_type:
- data4 XEN_NATIVE_ASM
- .previous
-
- __INIT
-ENTRY(startup_xen)
- // Calculate load offset.
- // The constant, LOAD_OFFSET, can't be used because the boot
- // loader doesn't always load to the LMA specified by the vmlinux.lds.
- mov r9=ip // must be the first instruction to make sure
- // that r9 = the physical address of startup_xen.
- // Usually r9 = startup_xen - LOAD_OFFSET
- movl r8=startup_xen
- ;;
- sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET.
-
- mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN
- movl r11=_start
- ;;
- add r11=r11,r9
- movl r8=hypervisor_type
- ;;
- add r8=r8,r9
- mov b0=r11
- ;;
- st8 [r8]=r10
- br.cond.sptk.many b0
- ;;
-END(startup_xen)
-
- ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
- ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
- ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
- ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET)
-
-#define isBP p3 // are we the Bootstrap Processor?
-
-GLOBAL_ENTRY(xen_setup_hook)
- mov r8=XEN_PV_DOMAIN_ASM
-(isBP) movl r9=xen_domain_type;;
-(isBP) st4 [r9]=r8
- movl r10=xen_ivt;;
-
- mov cr.iva=r10
-
- /* Set xsi base. */
-#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600
-(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
-(isBP) movl r28=XSI_BASE;;
-(isBP) break 0x1000;;
-
- /* setup pv_ops */
-(isBP) mov r4=rp
- ;;
-(isBP) br.call.sptk.many rp=xen_setup_pv_ops
- ;;
-(isBP) mov rp=r4
- ;;
-
- br.ret.sptk.many rp
- ;;
-END(xen_setup_hook)
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
index 6976621..1a40265 100644
--- a/arch/m32r/include/asm/barrier.h
+++ b/arch/m32r/include/asm/barrier.h
@@ -11,84 +11,6 @@
#define nop() __asm__ __volatile__ ("nop" : : )
-/*
- * Memory barrier.
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- */
-#define mb() barrier()
-#define rmb() mb()
-#define wmb() mb()
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
+#include <asm-generic/barrier.h>
#endif /* _ASM_M32R_BARRIER_H */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 75f25a8..dbdd223 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -87,6 +87,30 @@ config MMU_SUN3
bool
depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE
+config KEXEC
+ bool "kexec system call"
+ depends on M68KCLASSIC
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is independent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
+ The name comes from the similarity to the exec system call.
+
+ It is an ongoing process to be certain the hardware in a machine
+ is properly shutdown, so do not be surprised if this code does not
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
+
+config BOOTINFO_PROC
+ bool "Export bootinfo in procfs"
+ depends on KEXEC && M68KCLASSIC
+ help
+ Say Y to export the bootinfo used to boot the kernel in a
+ "bootinfo" file in procfs. This is useful with kexec.
+
menu "Platform setup"
source arch/m68k/Kconfig.cpu
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index 99449fb..ba03cec 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -87,7 +87,7 @@ void *amiga_chip_alloc_res(unsigned long size, struct resource *res)
atomic_sub(size, &chipavail);
pr_debug("amiga_chip_alloc_res: returning %pR\n", res);
- return (void *)ZTWO_VADDR(res->start);
+ return ZTWO_VADDR(res->start);
}
void amiga_chip_free(void *ptr)
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index b819390..9625b71 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -28,6 +28,8 @@
#include <linux/keyboard.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-amiga.h>
+#include <asm/byteorder.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
#include <asm/amigahw.h>
@@ -140,46 +142,46 @@ static struct resource ram_resource[NUM_MEMINFO];
* Parse an Amiga-specific record in the bootinfo
*/
-int amiga_parse_bootinfo(const struct bi_record *record)
+int __init amiga_parse_bootinfo(const struct bi_record *record)
{
int unknown = 0;
- const unsigned long *data = record->data;
+ const void *data = record->data;
- switch (record->tag) {
+ switch (be16_to_cpu(record->tag)) {
case BI_AMIGA_MODEL:
- amiga_model = *data;
+ amiga_model = be32_to_cpup(data);
break;
case BI_AMIGA_ECLOCK:
- amiga_eclock = *data;
+ amiga_eclock = be32_to_cpup(data);
break;
case BI_AMIGA_CHIPSET:
- amiga_chipset = *data;
+ amiga_chipset = be32_to_cpup(data);
break;
case BI_AMIGA_CHIP_SIZE:
- amiga_chip_size = *(const int *)data;
+ amiga_chip_size = be32_to_cpup(data);
break;
case BI_AMIGA_VBLANK:
- amiga_vblank = *(const unsigned char *)data;
+ amiga_vblank = *(const __u8 *)data;
break;
case BI_AMIGA_PSFREQ:
- amiga_psfreq = *(const unsigned char *)data;
+ amiga_psfreq = *(const __u8 *)data;
break;
case BI_AMIGA_AUTOCON:
#ifdef CONFIG_ZORRO
if (zorro_num_autocon < ZORRO_NUM_AUTO) {
- const struct ConfigDev *cd = (struct ConfigDev *)data;
- struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++];
+ const struct ConfigDev *cd = data;
+ struct zorro_dev_init *dev = &zorro_autocon_init[zorro_num_autocon++];
dev->rom = cd->cd_Rom;
- dev->slotaddr = cd->cd_SlotAddr;
- dev->slotsize = cd->cd_SlotSize;
- dev->resource.start = (unsigned long)cd->cd_BoardAddr;
- dev->resource.end = dev->resource.start + cd->cd_BoardSize - 1;
+ dev->slotaddr = be16_to_cpu(cd->cd_SlotAddr);
+ dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
+ dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
+ dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
} else
printk("amiga_parse_bootinfo: too many AutoConfig devices\n");
#endif /* CONFIG_ZORRO */
@@ -358,6 +360,14 @@ static void __init amiga_identify(void)
#undef AMIGAHW_ANNOUNCE
}
+
+static unsigned long amiga_random_get_entropy(void)
+{
+ /* VPOSR/VHPOSR provide at least 17 bits of data changing at 1.79 MHz */
+ return *(unsigned long *)&amiga_custom.vposr;
+}
+
+
/*
* Setup the Amiga configuration info
*/
@@ -395,6 +405,8 @@ void __init config_amiga(void)
mach_heartbeat = amiga_heartbeat;
#endif
+ mach_random_get_entropy = amiga_random_get_entropy;
+
/* Fill in the clock value (based on the 700 kHz E-Clock) */
amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */
@@ -608,6 +620,8 @@ static void amiga_mem_console_write(struct console *co, const char *s,
static int __init amiga_savekmsg_setup(char *arg)
{
+ bool registered;
+
if (!MACH_IS_AMIGA || strcmp(arg, "mem"))
return 0;
@@ -618,14 +632,16 @@ static int __init amiga_savekmsg_setup(char *arg)
/* Just steal the block, the chipram allocator isn't functional yet */
amiga_chip_size -= SAVEKMSG_MAXMEM;
- savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size);
+ savekmsg = ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size);
savekmsg->magic1 = SAVEKMSG_MAGIC1;
savekmsg->magic2 = SAVEKMSG_MAGIC2;
savekmsg->magicptr = ZTWO_PADDR(savekmsg);
savekmsg->size = 0;
+ registered = !!amiga_console_driver.write;
amiga_console_driver.write = amiga_mem_console_write;
- register_console(&amiga_console_driver);
+ if (!registered)
+ register_console(&amiga_console_driver);
return 0;
}
@@ -707,11 +723,16 @@ void amiga_serial_gets(struct console *co, char *s, int len)
static int __init amiga_debug_setup(char *arg)
{
- if (MACH_IS_AMIGA && !strcmp(arg, "ser")) {
- /* no initialization required (?) */
- amiga_console_driver.write = amiga_serial_console_write;
+ bool registered;
+
+ if (!MACH_IS_AMIGA || strcmp(arg, "ser"))
+ return 0;
+
+ /* no initialization required (?) */
+ registered = !!amiga_console_driver.write;
+ amiga_console_driver.write = amiga_serial_console_write;
+ if (!registered)
register_console(&amiga_console_driver);
- }
return 0;
}
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c
index dacd9f9..d34029d 100644
--- a/arch/m68k/amiga/platform.c
+++ b/arch/m68k/amiga/platform.c
@@ -13,6 +13,7 @@
#include <asm/amigahw.h>
#include <asm/amigayle.h>
+#include <asm/byteorder.h>
#ifdef CONFIG_ZORRO
@@ -66,10 +67,12 @@ static int __init z_dev_present(zorro_id id)
{
unsigned int i;
- for (i = 0; i < zorro_num_autocon; i++)
- if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) &&
- zorro_autocon[i].rom.er_Product == ZORRO_PROD(id))
+ for (i = 0; i < zorro_num_autocon; i++) {
+ const struct ExpansionRom *rom = &zorro_autocon_init[i].rom;
+ if (be16_to_cpu(rom->er_Manufacturer) == ZORRO_MANUF(id) &&
+ rom->er_Product == ZORRO_PROD(id))
return 1;
+ }
return 0;
}
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 3ea56b9..9268c0f9 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -1,3 +1,4 @@
+#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -9,6 +10,8 @@
#include <asm/setup.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-apollo.h>
+#include <asm/byteorder.h>
#include <asm/pgtable.h>
#include <asm/apollohw.h>
#include <asm/irq.h>
@@ -43,26 +46,25 @@ static const char *apollo_models[] = {
[APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)"
};
-int apollo_parse_bootinfo(const struct bi_record *record) {
-
+int __init apollo_parse_bootinfo(const struct bi_record *record)
+{
int unknown = 0;
- const unsigned long *data = record->data;
+ const void *data = record->data;
- switch(record->tag) {
- case BI_APOLLO_MODEL:
- apollo_model=*data;
- break;
+ switch (be16_to_cpu(record->tag)) {
+ case BI_APOLLO_MODEL:
+ apollo_model = be32_to_cpup(data);
+ break;
- default:
- unknown=1;
+ default:
+ unknown=1;
}
return unknown;
}
-void dn_setup_model(void) {
-
-
+static void __init dn_setup_model(void)
+{
printk("Apollo hardware found: ");
printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]);
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 20cde4e..3e73a63 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -333,6 +333,9 @@ void __init atari_init_IRQ(void)
m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq,
IRQ_MFP_TIMER1, 8);
+ irq_set_status_flags(IRQ_MFP_TIMER1, IRQ_IS_POLLED);
+ irq_set_status_flags(IRQ_MFP_TIMER2, IRQ_IS_POLLED);
+
/* prepare timer D data for use as poll interrupt */
/* set Timer D data Register - needs to be > 0 */
st_mfp.tim_dt_d = 254; /* < 100 Hz */
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index fb2d0bd..01a6216 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -37,6 +37,8 @@
#include <linux/module.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-atari.h>
+#include <asm/byteorder.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
#include <asm/atariints.h>
@@ -129,14 +131,14 @@ static int __init scc_test(volatile char *ctla)
int __init atari_parse_bootinfo(const struct bi_record *record)
{
int unknown = 0;
- const u_long *data = record->data;
+ const void *data = record->data;
- switch (record->tag) {
+ switch (be16_to_cpu(record->tag)) {
case BI_ATARI_MCH_COOKIE:
- atari_mch_cookie = *data;
+ atari_mch_cookie = be32_to_cpup(data);
break;
case BI_ATARI_MCH_TYPE:
- atari_mch_type = *data;
+ atari_mch_type = be32_to_cpup(data);
break;
default:
unknown = 1;
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index a547ba9..03cb5e0 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -287,6 +287,8 @@ static void __init atari_init_midi_port(int cflag)
static int __init atari_debug_setup(char *arg)
{
+ bool registered;
+
if (!MACH_IS_ATARI)
return 0;
@@ -294,6 +296,7 @@ static int __init atari_debug_setup(char *arg)
/* defaults to ser2 for a Falcon and ser1 otherwise */
arg = MACH_IS_FALCON ? "ser2" : "ser1";
+ registered = !!atari_console_driver.write;
if (!strcmp(arg, "ser1")) {
/* ST-MFP Modem1 serial port */
atari_init_mfp_port(B9600|CS8);
@@ -317,7 +320,7 @@ static int __init atari_debug_setup(char *arg)
sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */
atari_console_driver.write = atari_par_console_write;
}
- if (atari_console_driver.write)
+ if (atari_console_driver.write && !registered)
register_console(&atari_console_driver);
return 0;
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 8943aa4..478623d 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -28,6 +28,8 @@
#include <linux/bcd.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-vme.h>
+#include <asm/byteorder.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -50,9 +52,9 @@ void bvme6000_set_vectors (void);
static irq_handler_t tick_handler;
-int bvme6000_parse_bootinfo(const struct bi_record *bi)
+int __init bvme6000_parse_bootinfo(const struct bi_record *bi)
{
- if (bi->tag == BI_VME_TYPE)
+ if (be16_to_cpu(bi->tag) == BI_VME_TYPE)
return 0;
else
return 1;
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 19325e1..559ff3a 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -52,7 +52,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -63,11 +62,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -85,6 +84,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -98,6 +108,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -130,6 +141,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -144,11 +156,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -156,6 +175,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -170,6 +190,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -183,11 +206,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -195,10 +220,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -216,6 +244,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
@@ -262,6 +291,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -271,10 +301,10 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_FUJITSU is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -285,6 +315,7 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -311,7 +342,6 @@ CONFIG_JOYSTICK_AMIGA=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_PRINTER=m
@@ -345,10 +375,6 @@ CONFIG_HEARTBEAT=y
CONFIG_PROC_HARDWARE=y
CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -385,7 +411,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -444,10 +470,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -480,6 +506,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 14dc6cc..cb1f55d 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -193,10 +218,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -244,12 +273,14 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -258,6 +289,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -279,7 +311,6 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
CONFIG_SERIO=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
@@ -302,10 +333,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_HEARTBEAT=y
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -342,7 +369,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -401,10 +428,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -437,6 +464,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 6d5370c..e880cfb 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -192,10 +217,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -211,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
@@ -249,10 +278,10 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
-CONFIG_MII=y
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -260,6 +289,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
CONFIG_ATARILANCE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -267,6 +297,7 @@ CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -291,7 +322,6 @@ CONFIG_MOUSE_ATARI=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_PRINTER=m
@@ -320,10 +350,6 @@ CONFIG_NFBLOCK=y
CONFIG_NFCON=y
CONFIG_NFETH=y
CONFIG_ATARI_DSP56K=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -360,7 +386,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -419,10 +445,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -455,6 +481,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index c015ddb..4aa4f45 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -191,10 +216,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_BVME6000_NET=y
@@ -257,6 +288,7 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ec7382d..7cd9d9f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -193,10 +218,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -244,6 +273,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -251,6 +281,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
CONFIG_HPLANCE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -259,6 +290,7 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -282,7 +314,6 @@ CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_HP_SDC_RTC=m
CONFIG_SERIO_SERPORT=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
@@ -304,10 +335,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -344,7 +371,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -403,10 +430,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -439,6 +466,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 7d46fbe..31f5bd0 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -195,11 +220,13 @@ CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -212,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
@@ -261,6 +289,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -268,6 +297,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_MAC89x0=y
@@ -279,6 +309,7 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -302,7 +333,6 @@ CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
CONFIG_SERIO=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_PMACZILOG=y
@@ -327,10 +357,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -367,7 +393,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -426,10 +452,11 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -462,6 +489,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b17a883..4e5adff 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -58,7 +58,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -69,11 +68,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -91,6 +90,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -104,6 +114,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -136,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -150,11 +162,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -162,6 +181,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -176,6 +196,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -189,11 +212,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -204,11 +229,13 @@ CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -230,6 +257,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
@@ -290,10 +318,10 @@ CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
-CONFIG_MII=y
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -308,10 +336,10 @@ CONFIG_HPLANCE=y
CONFIG_MVME147_NET=y
CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_MAC89x0=y
-# CONFIG_NET_VENDOR_FUJITSU is not set
# CONFIG_NET_VENDOR_HP is not set
CONFIG_BVME6000_NET=y
CONFIG_MVME16x_NET=y
@@ -325,6 +353,7 @@ CONFIG_APNE=y
CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
CONFIG_PPP=m
@@ -357,7 +386,6 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
CONFIG_HP_SDC_RTC=m
CONFIG_SERIO_Q40KBD=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_PMACZILOG=y
@@ -405,10 +433,6 @@ CONFIG_NFETH=y
CONFIG_ATARI_DSP56K=m
CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -445,7 +469,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -504,10 +528,11 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -540,6 +565,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 5586c65..02cdbac 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -47,7 +47,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -58,11 +57,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -80,6 +79,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -93,6 +103,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -125,6 +136,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -139,11 +151,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -151,6 +170,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -165,6 +185,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -178,11 +201,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -190,10 +215,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -205,6 +233,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -242,6 +271,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -249,6 +279,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
CONFIG_MVME147_NET=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -257,6 +288,7 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index e5e8262..05a990a 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -191,10 +216,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_MVME16x_NET=y
@@ -257,6 +288,7 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -393,10 +421,11 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -429,6 +458,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index be1496e..568e2a9 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -191,10 +216,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -209,6 +237,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
@@ -249,6 +278,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -257,10 +287,10 @@ CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_FUJITSU is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -269,6 +299,7 @@ CONFIG_NE2000=m
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
CONFIG_PPP=m
@@ -293,7 +324,6 @@ CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
CONFIG_SERIO_Q40KBD=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_PRINTER=m
@@ -318,10 +348,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_HEARTBEAT=y
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -358,7 +384,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -417,10 +443,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -453,6 +479,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 54674d6..60b0aea 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -188,10 +213,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -255,6 +286,7 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_SUNKBD=y
# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
@@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 832d953..21bda33 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
@@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
@@ -188,10 +213,13 @@ CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_FW_LOADER_USER_HELPER is not set
@@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
+CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_TGT=m
@@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_VXLAN=m
@@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -255,6 +286,7 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_SUNKBD=y
# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
@@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
@@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_STRING_HELPERS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
@@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 121a666..71b78ec 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -9,6 +9,7 @@
* the GNU General Public License (GPL), incorporated herein by reference.
*/
+#include <linux/init.h>
#include <linux/types.h>
#include <linux/console.h>
#include <linux/string.h>
@@ -70,7 +71,7 @@ static void nf_poweroff(void)
nf_call(id);
}
-void nf_init(void)
+void __init nf_init(void)
{
unsigned long id, version;
char buf[256];
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index b7609f7..2e5a787 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -14,6 +14,8 @@
#include <linux/console.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-hp300.h>
+#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/blinken.h>
#include <asm/io.h> /* readb() and writeb() */
@@ -70,15 +72,15 @@ extern int hp300_setup_serial_console(void) __init;
int __init hp300_parse_bootinfo(const struct bi_record *record)
{
int unknown = 0;
- const unsigned long *data = record->data;
+ const void *data = record->data;
- switch (record->tag) {
+ switch (be16_to_cpu(record->tag)) {
case BI_HP300_MODEL:
- hp300_model = *data;
+ hp300_model = be32_to_cpup(data);
break;
case BI_HP300_UART_SCODE:
- hp300_uart_scode = *data;
+ hp300_uart_scode = be32_to_cpup(data);
break;
case BI_HP300_UART_ADDR:
diff --git a/arch/m68k/include/asm/amigahw.h b/arch/m68k/include/asm/amigahw.h
index 7a19b56..5ad5681 100644
--- a/arch/m68k/include/asm/amigahw.h
+++ b/arch/m68k/include/asm/amigahw.h
@@ -18,26 +18,7 @@
#include <linux/ioport.h>
- /*
- * Different Amiga models
- */
-
-#define AMI_UNKNOWN (0)
-#define AMI_500 (1)
-#define AMI_500PLUS (2)
-#define AMI_600 (3)
-#define AMI_1000 (4)
-#define AMI_1200 (5)
-#define AMI_2000 (6)
-#define AMI_2500 (7)
-#define AMI_3000 (8)
-#define AMI_3000T (9)
-#define AMI_3000PLUS (10)
-#define AMI_4000 (11)
-#define AMI_4000T (12)
-#define AMI_CDTV (13)
-#define AMI_CD32 (14)
-#define AMI_DRACO (15)
+#include <asm/bootinfo-amiga.h>
/*
@@ -46,11 +27,6 @@
extern unsigned long amiga_chipset;
-#define CS_STONEAGE (0)
-#define CS_OCS (1)
-#define CS_ECS (2)
-#define CS_AGA (3)
-
/*
* Miscellaneous
@@ -266,7 +242,7 @@ struct CIA {
#define zTwoBase (0x80000000)
#define ZTWO_PADDR(x) (((unsigned long)(x))-zTwoBase)
-#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase)
+#define ZTWO_VADDR(x) ((void __iomem *)(((unsigned long)(x))+zTwoBase))
#define CUSTOM_PHYSADDR (0xdff000)
#define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h
index 6c19e0c..87fc899 100644
--- a/arch/m68k/include/asm/apollohw.h
+++ b/arch/m68k/include/asm/apollohw.h
@@ -5,18 +5,11 @@
#include <linux/types.h>
-/*
- apollo models
-*/
+#include <asm/bootinfo-apollo.h>
+
extern u_long apollo_model;
-#define APOLLO_UNKNOWN (0)
-#define APOLLO_DN3000 (1)
-#define APOLLO_DN3010 (2)
-#define APOLLO_DN3500 (3)
-#define APOLLO_DN4000 (4)
-#define APOLLO_DN4500 (5)
/*
see scn2681 data sheet for more info.
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index d887050..972c8f3 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -21,7 +21,7 @@
#define _LINUX_ATARIHW_H_
#include <linux/types.h>
-#include <asm/bootinfo.h>
+#include <asm/bootinfo-atari.h>
#include <asm/raw_io.h>
extern u_long atari_mch_cookie;
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
index 445ce22..15c5f77 100644
--- a/arch/m68k/include/asm/barrier.h
+++ b/arch/m68k/include/asm/barrier.h
@@ -1,20 +1,8 @@
#ifndef _M68K_BARRIER_H
#define _M68K_BARRIER_H
-/*
- * Force strict CPU ordering.
- * Not really required on m68k...
- */
#define nop() do { asm volatile ("nop"); barrier(); } while (0)
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define read_barrier_depends() ((void)0)
-#define set_mb(var, value) ({ (var) = (value); wmb(); })
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() ((void)0)
+#include <asm-generic/barrier.h>
#endif /* _M68K_BARRIER_H */
diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h
index 67e7a78..8e21326 100644
--- a/arch/m68k/include/asm/bootinfo.h
+++ b/arch/m68k/include/asm/bootinfo.h
@@ -6,373 +6,23 @@
** This file is subject to the terms and conditions of the GNU General Public
** License. See the file COPYING in the main directory of this archive
** for more details.
-**
-** Created 09/29/92 by Greg Harp
-**
-** 5/2/94 Roman Hodek:
-** Added bi_atari part of the machine dependent union bi_un; for now it
-** contains just a model field to distinguish between TT and Falcon.
-** 26/7/96 Roman Zippel:
-** Renamed to setup.h; added some useful macros to allow gcc some
-** optimizations if possible.
-** 5/10/96 Geert Uytterhoeven:
-** Redesign of the boot information structure; renamed to bootinfo.h again
-** 27/11/96 Geert Uytterhoeven:
-** Backwards compatibility with bootinfo interface version 1.0
*/
#ifndef _M68K_BOOTINFO_H
#define _M68K_BOOTINFO_H
+#include <uapi/asm/bootinfo.h>
- /*
- * Bootinfo definitions
- *
- * This is an easily parsable and extendable structure containing all
- * information to be passed from the bootstrap to the kernel.
- *
- * This way I hope to keep all future changes back/forewards compatible.
- * Thus, keep your fingers crossed...
- *
- * This structure is copied right after the kernel bss by the bootstrap
- * routine.
- */
#ifndef __ASSEMBLY__
-struct bi_record {
- unsigned short tag; /* tag ID */
- unsigned short size; /* size of record (in bytes) */
- unsigned long data[0]; /* data */
-};
-
-#endif /* __ASSEMBLY__ */
-
-
- /*
- * Tag Definitions
- *
- * Machine independent tags start counting from 0x0000
- * Machine dependent tags start counting from 0x8000
- */
-
-#define BI_LAST 0x0000 /* last record (sentinel) */
-#define BI_MACHTYPE 0x0001 /* machine type (u_long) */
-#define BI_CPUTYPE 0x0002 /* cpu type (u_long) */
-#define BI_FPUTYPE 0x0003 /* fpu type (u_long) */
-#define BI_MMUTYPE 0x0004 /* mmu type (u_long) */
-#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */
- /* (struct mem_info) */
-#define BI_RAMDISK 0x0006 /* ramdisk address and size */
- /* (struct mem_info) */
-#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */
- /* (string) */
-
- /*
- * Amiga-specific tags
- */
-
-#define BI_AMIGA_MODEL 0x8000 /* model (u_long) */
-#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */
- /* (struct ConfigDev) */
-#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (u_long) */
-#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (u_char) */
-#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (u_char) */
-#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (u_long) */
-#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (u_long) */
-#define BI_AMIGA_SERPER 0x8007 /* serial port period (u_short) */
-
- /*
- * Atari-specific tags
- */
-
-#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (u_long) */
-#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (u_long) */
- /* (values are ATARI_MACH_* defines */
-
-/* mch_cookie values (upper word) */
-#define ATARI_MCH_ST 0
-#define ATARI_MCH_STE 1
-#define ATARI_MCH_TT 2
-#define ATARI_MCH_FALCON 3
-
-/* mch_type values */
-#define ATARI_MACH_NORMAL 0 /* no special machine type */
-#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */
-#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */
-#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */
-
- /*
- * VME-specific tags
- */
-
-#define BI_VME_TYPE 0x8000 /* VME sub-architecture (u_long) */
-#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */
-
-/* BI_VME_TYPE codes */
-#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */
-#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */
-#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */
-#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */
-#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */
-#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */
-#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */
-#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */
-#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */
-
-/* BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on
- * Motorola VME boards. Contains board number, Bug version, board
- * configuration options, etc. See include/asm/mvme16xhw.h for details.
- */
-
-
- /*
- * Macintosh-specific tags (all u_long)
- */
-
-#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */
-#define BI_MAC_VADDR 0x8001 /* Mac video base address */
-#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */
-#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */
-#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */
-#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */
-#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */
-#define BI_MAC_BTIME 0x8007 /* Mac boot time */
-#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */
-#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */
-#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */
-#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */
-
- /*
- * Macintosh hardware profile data - unused, see macintosh.h for
- * reasonable type values
- */
-
-#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */
-#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */
-#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */
-#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */
-#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */
-#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */
-#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */
-#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */
-#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */
-#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */
-#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */
-#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */
-#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */
-#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */
-#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */
-#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */
-#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */
-#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */
-
- /*
- * Mac: compatibility with old booter data format (temporarily)
- * Fields unused with the new bootinfo can be deleted now; instead of
- * adding new fields the struct might be splitted into a hardware address
- * part and a hardware type part
- */
-
-#ifndef __ASSEMBLY__
-
-struct mac_booter_data
-{
- unsigned long videoaddr;
- unsigned long videorow;
- unsigned long videodepth;
- unsigned long dimensions;
- unsigned long args;
- unsigned long boottime;
- unsigned long gmtbias;
- unsigned long bootver;
- unsigned long videological;
- unsigned long sccbase;
- unsigned long id;
- unsigned long memsize;
- unsigned long serialmf;
- unsigned long serialhsk;
- unsigned long serialgpi;
- unsigned long printmf;
- unsigned long printhsk;
- unsigned long printgpi;
- unsigned long cpuid;
- unsigned long rombase;
- unsigned long adbdelay;
- unsigned long timedbra;
-};
-
-extern struct mac_booter_data
- mac_bi_data;
-
+#ifdef CONFIG_BOOTINFO_PROC
+extern void save_bootinfo(const struct bi_record *bi);
+#else
+static inline void save_bootinfo(const struct bi_record *bi) {}
#endif
- /*
- * Apollo-specific tags
- */
-
-#define BI_APOLLO_MODEL 0x8000 /* model (u_long) */
-
- /*
- * HP300-specific tags
- */
-
-#define BI_HP300_MODEL 0x8000 /* model (u_long) */
-#define BI_HP300_UART_SCODE 0x8001 /* UART select code (u_long) */
-#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (u_long) */
-
- /*
- * Stuff for bootinfo interface versioning
- *
- * At the start of kernel code, a 'struct bootversion' is located.
- * bootstrap checks for a matching version of the interface before booting
- * a kernel, to avoid user confusion if kernel and bootstrap don't work
- * together :-)
- *
- * If incompatible changes are made to the bootinfo interface, the major
- * number below should be stepped (and the minor reset to 0) for the
- * appropriate machine. If a change is backward-compatible, the minor
- * should be stepped. "Backwards-compatible" means that booting will work,
- * but certain features may not.
- */
-
-#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */
-#define MK_BI_VERSION(major,minor) (((major)<<16)+(minor))
-#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff)
-#define BI_VERSION_MINOR(v) ((v) & 0xffff)
-
-#ifndef __ASSEMBLY__
-
-struct bootversion {
- unsigned short branch;
- unsigned long magic;
- struct {
- unsigned long machtype;
- unsigned long version;
- } machversions[0];
-};
-
#endif /* __ASSEMBLY__ */
-#define AMIGA_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-#define ATARI_BOOTI_VERSION MK_BI_VERSION( 2, 1 )
-#define MAC_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-#define MVME147_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-#define MVME16x_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-#define BVME6000_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-#define Q40_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-#define HP300_BOOTI_VERSION MK_BI_VERSION( 2, 0 )
-
-#ifdef BOOTINFO_COMPAT_1_0
-
- /*
- * Backwards compatibility with bootinfo interface version 1.0
- */
-
-#define COMPAT_AMIGA_BOOTI_VERSION MK_BI_VERSION( 1, 0 )
-#define COMPAT_ATARI_BOOTI_VERSION MK_BI_VERSION( 1, 0 )
-#define COMPAT_MAC_BOOTI_VERSION MK_BI_VERSION( 1, 0 )
-
-#include <linux/zorro.h>
-
-#define COMPAT_NUM_AUTO 16
-
-struct compat_bi_Amiga {
- int model;
- int num_autocon;
- struct ConfigDev autocon[COMPAT_NUM_AUTO];
- unsigned long chip_size;
- unsigned char vblank;
- unsigned char psfreq;
- unsigned long eclock;
- unsigned long chipset;
- unsigned long hw_present;
-};
-
-struct compat_bi_Atari {
- unsigned long hw_present;
- unsigned long mch_cookie;
-};
-
-#ifndef __ASSEMBLY__
-
-struct compat_bi_Macintosh
-{
- unsigned long videoaddr;
- unsigned long videorow;
- unsigned long videodepth;
- unsigned long dimensions;
- unsigned long args;
- unsigned long boottime;
- unsigned long gmtbias;
- unsigned long bootver;
- unsigned long videological;
- unsigned long sccbase;
- unsigned long id;
- unsigned long memsize;
- unsigned long serialmf;
- unsigned long serialhsk;
- unsigned long serialgpi;
- unsigned long printmf;
- unsigned long printhsk;
- unsigned long printgpi;
- unsigned long cpuid;
- unsigned long rombase;
- unsigned long adbdelay;
- unsigned long timedbra;
-};
-
-#endif
-
-struct compat_mem_info {
- unsigned long addr;
- unsigned long size;
-};
-
-#define COMPAT_NUM_MEMINFO 4
-
-#define COMPAT_CPUB_68020 0
-#define COMPAT_CPUB_68030 1
-#define COMPAT_CPUB_68040 2
-#define COMPAT_CPUB_68060 3
-#define COMPAT_FPUB_68881 5
-#define COMPAT_FPUB_68882 6
-#define COMPAT_FPUB_68040 7
-#define COMPAT_FPUB_68060 8
-
-#define COMPAT_CPU_68020 (1<<COMPAT_CPUB_68020)
-#define COMPAT_CPU_68030 (1<<COMPAT_CPUB_68030)
-#define COMPAT_CPU_68040 (1<<COMPAT_CPUB_68040)
-#define COMPAT_CPU_68060 (1<<COMPAT_CPUB_68060)
-#define COMPAT_CPU_MASK (31)
-#define COMPAT_FPU_68881 (1<<COMPAT_FPUB_68881)
-#define COMPAT_FPU_68882 (1<<COMPAT_FPUB_68882)
-#define COMPAT_FPU_68040 (1<<COMPAT_FPUB_68040)
-#define COMPAT_FPU_68060 (1<<COMPAT_FPUB_68060)
-#define COMPAT_FPU_MASK (0xfe0)
-
-#define COMPAT_CL_SIZE (256)
-
-struct compat_bootinfo {
- unsigned long machtype;
- unsigned long cputype;
- struct compat_mem_info memory[COMPAT_NUM_MEMINFO];
- int num_memory;
- unsigned long ramdisk_size;
- unsigned long ramdisk_addr;
- char command_line[COMPAT_CL_SIZE];
- union {
- struct compat_bi_Amiga bi_ami;
- struct compat_bi_Atari bi_ata;
- struct compat_bi_Macintosh bi_mac;
- } bi_un;
-};
-
-#define bi_amiga bi_un.bi_ami
-#define bi_atari bi_un.bi_ata
-#define bi_mac bi_un.bi_mac
-
-#endif /* BOOTINFO_COMPAT_1_0 */
-
#endif /* _M68K_BOOTINFO_H */
diff --git a/arch/m68k/include/asm/hp300hw.h b/arch/m68k/include/asm/hp300hw.h
index d998ea6..64f5271 100644
--- a/arch/m68k/include/asm/hp300hw.h
+++ b/arch/m68k/include/asm/hp300hw.h
@@ -1,25 +1,9 @@
#ifndef _M68K_HP300HW_H
#define _M68K_HP300HW_H
-extern unsigned long hp300_model;
+#include <asm/bootinfo-hp300.h>
-/* This information was taken from NetBSD */
-#define HP_320 (0) /* 16MHz 68020+HP MMU+16K external cache */
-#define HP_330 (1) /* 16MHz 68020+68851 MMU */
-#define HP_340 (2) /* 16MHz 68030 */
-#define HP_345 (3) /* 50MHz 68030+32K external cache */
-#define HP_350 (4) /* 25MHz 68020+HP MMU+32K external cache */
-#define HP_360 (5) /* 25MHz 68030 */
-#define HP_370 (6) /* 33MHz 68030+64K external cache */
-#define HP_375 (7) /* 50MHz 68030+32K external cache */
-#define HP_380 (8) /* 25MHz 68040 */
-#define HP_385 (9) /* 33MHz 68040 */
-#define HP_400 (10) /* 50MHz 68030+32K external cache */
-#define HP_425T (11) /* 25MHz 68040 - model 425t */
-#define HP_425S (12) /* 25MHz 68040 - model 425s */
-#define HP_425E (13) /* 25MHz 68040 - model 425e */
-#define HP_433T (14) /* 33MHz 68040 - model 433t */
-#define HP_433S (15) /* 33MHz 68040 - model 433s */
+extern unsigned long hp300_model;
#endif /* _M68K_HP300HW_H */
diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h
new file mode 100644
index 0000000..3df97ab
--- /dev/null
+++ b/arch/m68k/include/asm/kexec.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_M68K_KEXEC_H
+#define _ASM_M68K_KEXEC_H
+
+#ifdef CONFIG_KEXEC
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE 4096
+
+#define KEXEC_ARCH KEXEC_ARCH_68K
+
+#ifndef __ASSEMBLY__
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ /* Dummy implementation for now */
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_KEXEC */
+
+#endif /* _ASM_M68K_KEXEC_H */
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index aeeedf8..fe3fc9a 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -254,6 +254,8 @@
extern volatile __u8 *via1,*via2;
extern int rbv_present,via_alt_mapping;
+struct irq_desc;
+
extern void via_register_interrupts(void);
extern void via_irq_enable(int);
extern void via_irq_disable(int);
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 682a1a2..d323b2c 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -4,6 +4,9 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <asm/bootinfo-mac.h>
+
+
/*
* Apple Macintoshisms
*/
@@ -74,65 +77,29 @@ struct mac_model
#define MAC_FLOPPY_SWIM_IOP 3
#define MAC_FLOPPY_AV 4
-/*
- * Gestalt numbers
- */
+extern struct mac_model *macintosh_config;
-#define MAC_MODEL_II 6
-#define MAC_MODEL_IIX 7
-#define MAC_MODEL_IICX 8
-#define MAC_MODEL_SE30 9
-#define MAC_MODEL_IICI 11
-#define MAC_MODEL_IIFX 13 /* And well numbered it is too */
-#define MAC_MODEL_IISI 18
-#define MAC_MODEL_LC 19
-#define MAC_MODEL_Q900 20
-#define MAC_MODEL_PB170 21
-#define MAC_MODEL_Q700 22
-#define MAC_MODEL_CLII 23 /* aka: P200 */
-#define MAC_MODEL_PB140 25
-#define MAC_MODEL_Q950 26 /* aka: WGS95 */
-#define MAC_MODEL_LCIII 27 /* aka: P450 */
-#define MAC_MODEL_PB210 29
-#define MAC_MODEL_C650 30
-#define MAC_MODEL_PB230 32
-#define MAC_MODEL_PB180 33
-#define MAC_MODEL_PB160 34
-#define MAC_MODEL_Q800 35 /* aka: WGS80 */
-#define MAC_MODEL_Q650 36
-#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */
-#define MAC_MODEL_PB250 38
-#define MAC_MODEL_IIVI 44
-#define MAC_MODEL_P600 45 /* aka: P600CD */
-#define MAC_MODEL_IIVX 48
-#define MAC_MODEL_CCL 49 /* aka: P250 */
-#define MAC_MODEL_PB165C 50
-#define MAC_MODEL_C610 52 /* aka: WGS60 */
-#define MAC_MODEL_Q610 53
-#define MAC_MODEL_PB145 54 /* aka: PB145B */
-#define MAC_MODEL_P520 56 /* aka: LC520 */
-#define MAC_MODEL_C660 60
-#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */
-#define MAC_MODEL_PB180C 71
-#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */
-#define MAC_MODEL_PB270C 77
-#define MAC_MODEL_Q840 78
-#define MAC_MODEL_P550 80 /* aka: LC550, P560 */
-#define MAC_MODEL_CCLII 83 /* aka: P275 */
-#define MAC_MODEL_PB165 84
-#define MAC_MODEL_PB190 85 /* aka: PB190CS */
-#define MAC_MODEL_TV 88
-#define MAC_MODEL_P475 89 /* aka: LC475, P476 */
-#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */
-#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */
-#define MAC_MODEL_Q605 94
-#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */
-#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */
-#define MAC_MODEL_P588 99 /* aka: LC580, P580 */
-#define MAC_MODEL_PB280 102
-#define MAC_MODEL_PB280C 103
-#define MAC_MODEL_PB150 115
-extern struct mac_model *macintosh_config;
+ /*
+ * Internal representation of the Mac hardware, filled in from bootinfo
+ */
+
+struct mac_booter_data
+{
+ unsigned long videoaddr;
+ unsigned long videorow;
+ unsigned long videodepth;
+ unsigned long dimensions;
+ unsigned long boottime;
+ unsigned long gmtbias;
+ unsigned long videological;
+ unsigned long sccbase;
+ unsigned long id;
+ unsigned long memsize;
+ unsigned long cpuid;
+ unsigned long rombase;
+};
+
+extern struct mac_booter_data mac_bi_data;
#endif
diff --git a/arch/m68k/include/asm/mc146818rtc.h b/arch/m68k/include/asm/mc146818rtc.h
index 9f70a01..05b43bf 100644
--- a/arch/m68k/include/asm/mc146818rtc.h
+++ b/arch/m68k/include/asm/mc146818rtc.h
@@ -10,16 +10,16 @@
#include <asm/atarihw.h>
-#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
+#define ATARI_RTC_PORT(x) (TT_RTC_BAS + 2*(x))
#define RTC_ALWAYS_BCD 0
#define CMOS_READ(addr) ({ \
-atari_outb_p((addr),RTC_PORT(0)); \
-atari_inb_p(RTC_PORT(1)); \
+atari_outb_p((addr), ATARI_RTC_PORT(0)); \
+atari_inb_p(ATARI_RTC_PORT(1)); \
})
#define CMOS_WRITE(val, addr) ({ \
-atari_outb_p((addr),RTC_PORT(0)); \
-atari_outb_p((val),RTC_PORT(1)); \
+atari_outb_p((addr), ATARI_RTC_PORT(0)); \
+atari_outb_p((val), ATARI_RTC_PORT(1)); \
})
#endif /* CONFIG_ATARI */
diff --git a/arch/m68k/include/asm/mvme16xhw.h b/arch/m68k/include/asm/mvme16xhw.h
index 6117f56..1eb89de 100644
--- a/arch/m68k/include/asm/mvme16xhw.h
+++ b/arch/m68k/include/asm/mvme16xhw.h
@@ -3,23 +3,6 @@
#include <asm/irq.h>
-/* Board ID data structure - pointer to this retrieved from Bug by head.S */
-
-/* Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) */
-
-extern long mvme_bdid_ptr;
-
-typedef struct {
- char bdid[4];
- u_char rev, mth, day, yr;
- u_short size, reserved;
- u_short brdno;
- char brdsuffix[2];
- u_long options;
- u_short clun, dlun, ctype, dnum;
- u_long option2;
-} t_bdid, *p_bdid;
-
typedef struct {
u_char ack_icr,
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h
index 65e78a2d..8f2023f 100644
--- a/arch/m68k/include/asm/setup.h
+++ b/arch/m68k/include/asm/setup.h
@@ -22,6 +22,7 @@
#ifndef _M68K_SETUP_H
#define _M68K_SETUP_H
+#include <uapi/asm/bootinfo.h>
#include <uapi/asm/setup.h>
@@ -297,14 +298,14 @@ extern int m68k_is040or060;
#define NUM_MEMINFO 4
#ifndef __ASSEMBLY__
-struct mem_info {
+struct m68k_mem_info {
unsigned long addr; /* physical address of memory chunk */
unsigned long size; /* length of memory chunk (in bytes) */
};
extern int m68k_num_memory; /* # of memory blocks found (and used) */
extern int m68k_realnum_memory; /* real # of memory blocks found */
-extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
+extern struct m68k_mem_info m68k_memory[NUM_MEMINFO];/* memory description */
#endif
#endif /* _M68K_SETUP_H */
diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h
index 6759dad..efc1f48 100644
--- a/arch/m68k/include/asm/timex.h
+++ b/arch/m68k/include/asm/timex.h
@@ -28,4 +28,14 @@ static inline cycles_t get_cycles(void)
return 0;
}
+extern unsigned long (*mach_random_get_entropy)(void);
+
+static inline unsigned long random_get_entropy(void)
+{
+ if (mach_random_get_entropy)
+ return mach_random_get_entropy();
+ return 0;
+}
+#define random_get_entropy random_get_entropy
+
#endif
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 1fef45a..6a2d257 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -11,6 +11,14 @@ generic-y += termbits.h
generic-y += termios.h
header-y += a.out.h
+header-y += bootinfo.h
+header-y += bootinfo-amiga.h
+header-y += bootinfo-apollo.h
+header-y += bootinfo-atari.h
+header-y += bootinfo-hp300.h
+header-y += bootinfo-mac.h
+header-y += bootinfo-q40.h
+header-y += bootinfo-vme.h
header-y += byteorder.h
header-y += cachectl.h
header-y += fcntl.h
diff --git a/arch/m68k/include/uapi/asm/bootinfo-amiga.h b/arch/m68k/include/uapi/asm/bootinfo-amiga.h
new file mode 100644
index 0000000..daad3c5
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-amiga.h
@@ -0,0 +1,63 @@
+/*
+** asm/bootinfo-amiga.h -- Amiga-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_AMIGA_H
+#define _UAPI_ASM_M68K_BOOTINFO_AMIGA_H
+
+
+ /*
+ * Amiga-specific tags
+ */
+
+#define BI_AMIGA_MODEL 0x8000 /* model (__be32) */
+#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */
+ /* (AmigaOS struct ConfigDev) */
+#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (__be32) */
+#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (__u8) */
+#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (__u8) */
+#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (__be32) */
+#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (__be32) */
+#define BI_AMIGA_SERPER 0x8007 /* serial port period (__be16) */
+
+
+ /*
+ * Amiga models (BI_AMIGA_MODEL)
+ */
+
+#define AMI_UNKNOWN 0
+#define AMI_500 1
+#define AMI_500PLUS 2
+#define AMI_600 3
+#define AMI_1000 4
+#define AMI_1200 5
+#define AMI_2000 6
+#define AMI_2500 7
+#define AMI_3000 8
+#define AMI_3000T 9
+#define AMI_3000PLUS 10
+#define AMI_4000 11
+#define AMI_4000T 12
+#define AMI_CDTV 13
+#define AMI_CD32 14
+#define AMI_DRACO 15
+
+
+ /*
+ * Amiga chipsets (BI_AMIGA_CHIPSET)
+ */
+
+#define CS_STONEAGE 0
+#define CS_OCS 1
+#define CS_ECS 2
+#define CS_AGA 3
+
+
+ /*
+ * Latest Amiga bootinfo version
+ */
+
+#define AMIGA_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_AMIGA_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-apollo.h b/arch/m68k/include/uapi/asm/bootinfo-apollo.h
new file mode 100644
index 0000000..a93e0af
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-apollo.h
@@ -0,0 +1,28 @@
+/*
+** asm/bootinfo-apollo.h -- Apollo-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_APOLLO_H
+#define _UAPI_ASM_M68K_BOOTINFO_APOLLO_H
+
+
+ /*
+ * Apollo-specific tags
+ */
+
+#define BI_APOLLO_MODEL 0x8000 /* model (__be32) */
+
+
+ /*
+ * Apollo models (BI_APOLLO_MODEL)
+ */
+
+#define APOLLO_UNKNOWN 0
+#define APOLLO_DN3000 1
+#define APOLLO_DN3010 2
+#define APOLLO_DN3500 3
+#define APOLLO_DN4000 4
+#define APOLLO_DN4500 5
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_APOLLO_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-atari.h b/arch/m68k/include/uapi/asm/bootinfo-atari.h
new file mode 100644
index 0000000..a817854
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-atari.h
@@ -0,0 +1,44 @@
+/*
+** asm/bootinfo-atari.h -- Atari-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_ATARI_H
+#define _UAPI_ASM_M68K_BOOTINFO_ATARI_H
+
+
+ /*
+ * Atari-specific tags
+ */
+
+#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (__be32) */
+#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (__be32) */
+
+
+ /*
+ * mch_cookie values (upper word of BI_ATARI_MCH_COOKIE)
+ */
+
+#define ATARI_MCH_ST 0
+#define ATARI_MCH_STE 1
+#define ATARI_MCH_TT 2
+#define ATARI_MCH_FALCON 3
+
+
+ /*
+ * Atari machine types (BI_ATARI_MCH_TYPE)
+ */
+
+#define ATARI_MACH_NORMAL 0 /* no special machine type */
+#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */
+#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */
+#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */
+
+
+ /*
+ * Latest Atari bootinfo version
+ */
+
+#define ATARI_BOOTI_VERSION MK_BI_VERSION(2, 1)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_ATARI_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-hp300.h b/arch/m68k/include/uapi/asm/bootinfo-hp300.h
new file mode 100644
index 0000000..c90cb71
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-hp300.h
@@ -0,0 +1,50 @@
+/*
+** asm/bootinfo-hp300.h -- HP9000/300-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_HP300_H
+#define _UAPI_ASM_M68K_BOOTINFO_HP300_H
+
+
+ /*
+ * HP9000/300-specific tags
+ */
+
+#define BI_HP300_MODEL 0x8000 /* model (__be32) */
+#define BI_HP300_UART_SCODE 0x8001 /* UART select code (__be32) */
+#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (__be32) */
+
+
+ /*
+ * HP9000/300 and /400 models (BI_HP300_MODEL)
+ *
+ * This information was taken from NetBSD
+ */
+
+#define HP_320 0 /* 16MHz 68020+HP MMU+16K external cache */
+#define HP_330 1 /* 16MHz 68020+68851 MMU */
+#define HP_340 2 /* 16MHz 68030 */
+#define HP_345 3 /* 50MHz 68030+32K external cache */
+#define HP_350 4 /* 25MHz 68020+HP MMU+32K external cache */
+#define HP_360 5 /* 25MHz 68030 */
+#define HP_370 6 /* 33MHz 68030+64K external cache */
+#define HP_375 7 /* 50MHz 68030+32K external cache */
+#define HP_380 8 /* 25MHz 68040 */
+#define HP_385 9 /* 33MHz 68040 */
+
+#define HP_400 10 /* 50MHz 68030+32K external cache */
+#define HP_425T 11 /* 25MHz 68040 - model 425t */
+#define HP_425S 12 /* 25MHz 68040 - model 425s */
+#define HP_425E 13 /* 25MHz 68040 - model 425e */
+#define HP_433T 14 /* 33MHz 68040 - model 433t */
+#define HP_433S 15 /* 33MHz 68040 - model 433s */
+
+
+ /*
+ * Latest HP9000/300 bootinfo version
+ */
+
+#define HP300_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_HP300_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-mac.h b/arch/m68k/include/uapi/asm/bootinfo-mac.h
new file mode 100644
index 0000000..b44ff73
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-mac.h
@@ -0,0 +1,119 @@
+/*
+** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H
+#define _UAPI_ASM_M68K_BOOTINFO_MAC_H
+
+
+ /*
+ * Macintosh-specific tags (all __be32)
+ */
+
+#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */
+#define BI_MAC_VADDR 0x8001 /* Mac video base address */
+#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */
+#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */
+#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */
+#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */
+#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */
+#define BI_MAC_BTIME 0x8007 /* Mac boot time */
+#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */
+#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */
+#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */
+#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */
+
+
+ /*
+ * Macintosh hardware profile data - unused, see macintosh.h for
+ * reasonable type values
+ */
+
+#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */
+#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */
+#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */
+#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */
+#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */
+#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */
+#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */
+#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */
+#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */
+#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */
+#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */
+#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */
+#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */
+#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */
+#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */
+#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */
+#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */
+#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */
+
+
+ /*
+ * Macintosh Gestalt numbers (BI_MAC_MODEL)
+ */
+
+#define MAC_MODEL_II 6
+#define MAC_MODEL_IIX 7
+#define MAC_MODEL_IICX 8
+#define MAC_MODEL_SE30 9
+#define MAC_MODEL_IICI 11
+#define MAC_MODEL_IIFX 13 /* And well numbered it is too */
+#define MAC_MODEL_IISI 18
+#define MAC_MODEL_LC 19
+#define MAC_MODEL_Q900 20
+#define MAC_MODEL_PB170 21
+#define MAC_MODEL_Q700 22
+#define MAC_MODEL_CLII 23 /* aka: P200 */
+#define MAC_MODEL_PB140 25
+#define MAC_MODEL_Q950 26 /* aka: WGS95 */
+#define MAC_MODEL_LCIII 27 /* aka: P450 */
+#define MAC_MODEL_PB210 29
+#define MAC_MODEL_C650 30
+#define MAC_MODEL_PB230 32
+#define MAC_MODEL_PB180 33
+#define MAC_MODEL_PB160 34
+#define MAC_MODEL_Q800 35 /* aka: WGS80 */
+#define MAC_MODEL_Q650 36
+#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */
+#define MAC_MODEL_PB250 38
+#define MAC_MODEL_IIVI 44
+#define MAC_MODEL_P600 45 /* aka: P600CD */
+#define MAC_MODEL_IIVX 48
+#define MAC_MODEL_CCL 49 /* aka: P250 */
+#define MAC_MODEL_PB165C 50
+#define MAC_MODEL_C610 52 /* aka: WGS60 */
+#define MAC_MODEL_Q610 53
+#define MAC_MODEL_PB145 54 /* aka: PB145B */
+#define MAC_MODEL_P520 56 /* aka: LC520 */
+#define MAC_MODEL_C660 60
+#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */
+#define MAC_MODEL_PB180C 71
+#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */
+#define MAC_MODEL_PB270C 77
+#define MAC_MODEL_Q840 78
+#define MAC_MODEL_P550 80 /* aka: LC550, P560 */
+#define MAC_MODEL_CCLII 83 /* aka: P275 */
+#define MAC_MODEL_PB165 84
+#define MAC_MODEL_PB190 85 /* aka: PB190CS */
+#define MAC_MODEL_TV 88
+#define MAC_MODEL_P475 89 /* aka: LC475, P476 */
+#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */
+#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */
+#define MAC_MODEL_Q605 94
+#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */
+#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */
+#define MAC_MODEL_P588 99 /* aka: LC580, P580 */
+#define MAC_MODEL_PB280 102
+#define MAC_MODEL_PB280C 103
+#define MAC_MODEL_PB150 115
+
+
+ /*
+ * Latest Macintosh bootinfo version
+ */
+
+#define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-q40.h b/arch/m68k/include/uapi/asm/bootinfo-q40.h
new file mode 100644
index 0000000..c79fea7
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-q40.h
@@ -0,0 +1,16 @@
+/*
+** asm/bootinfo-q40.h -- Q40-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_Q40_H
+#define _UAPI_ASM_M68K_BOOTINFO_Q40_H
+
+
+ /*
+ * Latest Q40 bootinfo version
+ */
+
+#define Q40_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_Q40_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-vme.h b/arch/m68k/include/uapi/asm/bootinfo-vme.h
new file mode 100644
index 0000000..a135eb4
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo-vme.h
@@ -0,0 +1,70 @@
+/*
+** asm/bootinfo-vme.h -- VME-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_VME_H
+#define _UAPI_ASM_M68K_BOOTINFO_VME_H
+
+
+#include <linux/types.h>
+
+
+ /*
+ * VME-specific tags
+ */
+
+#define BI_VME_TYPE 0x8000 /* VME sub-architecture (__be32) */
+#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */
+
+
+ /*
+ * VME models (BI_VME_TYPE)
+ */
+
+#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */
+#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */
+#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */
+#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */
+#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */
+#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */
+#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */
+#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */
+#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Board ID data structure - pointer to this retrieved from Bug by head.S
+ *
+ * BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on
+ * Motorola VME boards. Contains board number, Bug version, board
+ * configuration options, etc.
+ *
+ * Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc)
+ */
+
+typedef struct {
+ char bdid[4];
+ __u8 rev, mth, day, yr;
+ __be16 size, reserved;
+ __be16 brdno;
+ char brdsuffix[2];
+ __be32 options;
+ __be16 clun, dlun, ctype, dnum;
+ __be32 option2;
+} t_bdid, *p_bdid;
+
+#endif /* __ASSEMBLY__ */
+
+
+ /*
+ * Latest VME bootinfo versions
+ */
+
+#define MVME147_BOOTI_VERSION MK_BI_VERSION(2, 0)
+#define MVME16x_BOOTI_VERSION MK_BI_VERSION(2, 0)
+#define BVME6000_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_VME_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h
new file mode 100644
index 0000000..cdeb26a0
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/bootinfo.h
@@ -0,0 +1,174 @@
+/*
+ * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure
+ *
+ * Copyright 1992 by Greg Harp
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_H
+#define _UAPI_ASM_M68K_BOOTINFO_H
+
+
+#include <linux/types.h>
+
+
+#ifndef __ASSEMBLY__
+
+ /*
+ * Bootinfo definitions
+ *
+ * This is an easily parsable and extendable structure containing all
+ * information to be passed from the bootstrap to the kernel.
+ *
+ * This way I hope to keep all future changes back/forewards compatible.
+ * Thus, keep your fingers crossed...
+ *
+ * This structure is copied right after the kernel by the bootstrap
+ * routine.
+ */
+
+struct bi_record {
+ __be16 tag; /* tag ID */
+ __be16 size; /* size of record (in bytes) */
+ __be32 data[0]; /* data */
+};
+
+
+struct mem_info {
+ __be32 addr; /* physical address of memory chunk */
+ __be32 size; /* length of memory chunk (in bytes) */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+ /*
+ * Tag Definitions
+ *
+ * Machine independent tags start counting from 0x0000
+ * Machine dependent tags start counting from 0x8000
+ */
+
+#define BI_LAST 0x0000 /* last record (sentinel) */
+#define BI_MACHTYPE 0x0001 /* machine type (__be32) */
+#define BI_CPUTYPE 0x0002 /* cpu type (__be32) */
+#define BI_FPUTYPE 0x0003 /* fpu type (__be32) */
+#define BI_MMUTYPE 0x0004 /* mmu type (__be32) */
+#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */
+ /* (struct mem_info) */
+#define BI_RAMDISK 0x0006 /* ramdisk address and size */
+ /* (struct mem_info) */
+#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */
+ /* (string) */
+
+
+ /*
+ * Linux/m68k Architectures (BI_MACHTYPE)
+ */
+
+#define MACH_AMIGA 1
+#define MACH_ATARI 2
+#define MACH_MAC 3
+#define MACH_APOLLO 4
+#define MACH_SUN3 5
+#define MACH_MVME147 6
+#define MACH_MVME16x 7
+#define MACH_BVME6000 8
+#define MACH_HP300 9
+#define MACH_Q40 10
+#define MACH_SUN3X 11
+#define MACH_M54XX 12
+
+
+ /*
+ * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE)
+ *
+ * Note: we may rely on the following equalities:
+ *
+ * CPU_68020 == MMU_68851
+ * CPU_68030 == MMU_68030
+ * CPU_68040 == FPU_68040 == MMU_68040
+ * CPU_68060 == FPU_68060 == MMU_68060
+ */
+
+#define CPUB_68020 0
+#define CPUB_68030 1
+#define CPUB_68040 2
+#define CPUB_68060 3
+#define CPUB_COLDFIRE 4
+
+#define CPU_68020 (1 << CPUB_68020)
+#define CPU_68030 (1 << CPUB_68030)
+#define CPU_68040 (1 << CPUB_68040)
+#define CPU_68060 (1 << CPUB_68060)
+#define CPU_COLDFIRE (1 << CPUB_COLDFIRE)
+
+#define FPUB_68881 0
+#define FPUB_68882 1
+#define FPUB_68040 2 /* Internal FPU */
+#define FPUB_68060 3 /* Internal FPU */
+#define FPUB_SUNFPA 4 /* Sun-3 FPA */
+#define FPUB_COLDFIRE 5 /* ColdFire FPU */
+
+#define FPU_68881 (1 << FPUB_68881)
+#define FPU_68882 (1 << FPUB_68882)
+#define FPU_68040 (1 << FPUB_68040)
+#define FPU_68060 (1 << FPUB_68060)
+#define FPU_SUNFPA (1 << FPUB_SUNFPA)
+#define FPU_COLDFIRE (1 << FPUB_COLDFIRE)
+
+#define MMUB_68851 0
+#define MMUB_68030 1 /* Internal MMU */
+#define MMUB_68040 2 /* Internal MMU */
+#define MMUB_68060 3 /* Internal MMU */
+#define MMUB_APOLLO 4 /* Custom Apollo */
+#define MMUB_SUN3 5 /* Custom Sun-3 */
+#define MMUB_COLDFIRE 6 /* Internal MMU */
+
+#define MMU_68851 (1 << MMUB_68851)
+#define MMU_68030 (1 << MMUB_68030)
+#define MMU_68040 (1 << MMUB_68040)
+#define MMU_68060 (1 << MMUB_68060)
+#define MMU_SUN3 (1 << MMUB_SUN3)
+#define MMU_APOLLO (1 << MMUB_APOLLO)
+#define MMU_COLDFIRE (1 << MMUB_COLDFIRE)
+
+
+ /*
+ * Stuff for bootinfo interface versioning
+ *
+ * At the start of kernel code, a 'struct bootversion' is located.
+ * bootstrap checks for a matching version of the interface before booting
+ * a kernel, to avoid user confusion if kernel and bootstrap don't work
+ * together :-)
+ *
+ * If incompatible changes are made to the bootinfo interface, the major
+ * number below should be stepped (and the minor reset to 0) for the
+ * appropriate machine. If a change is backward-compatible, the minor
+ * should be stepped. "Backwards-compatible" means that booting will work,
+ * but certain features may not.
+ */
+
+#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */
+#define MK_BI_VERSION(major, minor) (((major) << 16) + (minor))
+#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff)
+#define BI_VERSION_MINOR(v) ((v) & 0xffff)
+
+#ifndef __ASSEMBLY__
+
+struct bootversion {
+ __be16 branch;
+ __be32 magic;
+ struct {
+ __be32 machtype;
+ __be32 version;
+ } machversions[0];
+} __packed;
+
+#endif /* __ASSEMBLY__ */
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_H */
diff --git a/arch/m68k/include/uapi/asm/setup.h b/arch/m68k/include/uapi/asm/setup.h
index 85579bf..6a6dc63 100644
--- a/arch/m68k/include/uapi/asm/setup.h
+++ b/arch/m68k/include/uapi/asm/setup.h
@@ -6,98 +6,11 @@
** This file is subject to the terms and conditions of the GNU General Public
** License. See the file COPYING in the main directory of this archive
** for more details.
-**
-** Created 09/29/92 by Greg Harp
-**
-** 5/2/94 Roman Hodek:
-** Added bi_atari part of the machine dependent union bi_un; for now it
-** contains just a model field to distinguish between TT and Falcon.
-** 26/7/96 Roman Zippel:
-** Renamed to setup.h; added some useful macros to allow gcc some
-** optimizations if possible.
-** 5/10/96 Geert Uytterhoeven:
-** Redesign of the boot information structure; moved boot information
-** structure to bootinfo.h
*/
#ifndef _UAPI_M68K_SETUP_H
#define _UAPI_M68K_SETUP_H
-
-
- /*
- * Linux/m68k Architectures
- */
-
-#define MACH_AMIGA 1
-#define MACH_ATARI 2
-#define MACH_MAC 3
-#define MACH_APOLLO 4
-#define MACH_SUN3 5
-#define MACH_MVME147 6
-#define MACH_MVME16x 7
-#define MACH_BVME6000 8
-#define MACH_HP300 9
-#define MACH_Q40 10
-#define MACH_SUN3X 11
-#define MACH_M54XX 12
-
#define COMMAND_LINE_SIZE 256
-
-
- /*
- * CPU, FPU and MMU types
- *
- * Note: we may rely on the following equalities:
- *
- * CPU_68020 == MMU_68851
- * CPU_68030 == MMU_68030
- * CPU_68040 == FPU_68040 == MMU_68040
- * CPU_68060 == FPU_68060 == MMU_68060
- */
-
-#define CPUB_68020 0
-#define CPUB_68030 1
-#define CPUB_68040 2
-#define CPUB_68060 3
-#define CPUB_COLDFIRE 4
-
-#define CPU_68020 (1<<CPUB_68020)
-#define CPU_68030 (1<<CPUB_68030)
-#define CPU_68040 (1<<CPUB_68040)
-#define CPU_68060 (1<<CPUB_68060)
-#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
-
-#define FPUB_68881 0
-#define FPUB_68882 1
-#define FPUB_68040 2 /* Internal FPU */
-#define FPUB_68060 3 /* Internal FPU */
-#define FPUB_SUNFPA 4 /* Sun-3 FPA */
-#define FPUB_COLDFIRE 5 /* ColdFire FPU */
-
-#define FPU_68881 (1<<FPUB_68881)
-#define FPU_68882 (1<<FPUB_68882)
-#define FPU_68040 (1<<FPUB_68040)
-#define FPU_68060 (1<<FPUB_68060)
-#define FPU_SUNFPA (1<<FPUB_SUNFPA)
-#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
-
-#define MMUB_68851 0
-#define MMUB_68030 1 /* Internal MMU */
-#define MMUB_68040 2 /* Internal MMU */
-#define MMUB_68060 3 /* Internal MMU */
-#define MMUB_APOLLO 4 /* Custom Apollo */
-#define MMUB_SUN3 5 /* Custom Sun-3 */
-#define MMUB_COLDFIRE 6 /* Internal MMU */
-
-#define MMU_68851 (1<<MMUB_68851)
-#define MMU_68030 (1<<MMUB_68030)
-#define MMU_68040 (1<<MMUB_68040)
-#define MMU_68060 (1<<MMUB_68060)
-#define MMU_SUN3 (1<<MMUB_SUN3)
-#define MMU_APOLLO (1<<MMUB_APOLLO)
-#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
-
-
#endif /* _UAPI_M68K_SETUP_H */
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 655347d..2d5d9be 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -22,3 +22,6 @@ obj-$(CONFIG_PCI) += pcibios.o
obj-$(CONFIG_HAS_DMA) += dma.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o
+
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 8b7b228..3a38634 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -98,6 +98,9 @@ int main(void)
DEFINE(CIABBASE, &ciab);
DEFINE(C_PRA, offsetof(struct CIA, pra));
DEFINE(ZTWOBASE, zTwoBase);
+
+ /* enum m68k_fixup_type */
+ DEFINE(M68K_FIXUP_MEMOFFSET, m68k_fixup_memoffset);
#endif
return 0;
diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c
new file mode 100644
index 0000000..7ee853e
--- /dev/null
+++ b/arch/m68k/kernel/bootinfo_proc.c
@@ -0,0 +1,80 @@
+/*
+ * Based on arch/arm/kernel/atags_proc.c
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/byteorder.h>
+
+
+static char bootinfo_tmp[1536] __initdata;
+
+static void *bootinfo_copy;
+static size_t bootinfo_size;
+
+static ssize_t bootinfo_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, bootinfo_copy,
+ bootinfo_size);
+}
+
+static const struct file_operations bootinfo_fops = {
+ .read = bootinfo_read,
+ .llseek = default_llseek,
+};
+
+void __init save_bootinfo(const struct bi_record *bi)
+{
+ const void *start = bi;
+ size_t size = sizeof(bi->tag);
+
+ while (be16_to_cpu(bi->tag) != BI_LAST) {
+ uint16_t n = be16_to_cpu(bi->size);
+ size += n;
+ bi = (struct bi_record *)((unsigned long)bi + n);
+ }
+
+ if (size > sizeof(bootinfo_tmp)) {
+ pr_err("Cannot save %zu bytes of bootinfo\n", size);
+ return;
+ }
+
+ pr_info("Saving %zu bytes of bootinfo\n", size);
+ memcpy(bootinfo_tmp, start, size);
+ bootinfo_size = size;
+}
+
+static int __init init_bootinfo_procfs(void)
+{
+ /*
+ * This cannot go into save_bootinfo() because kmalloc and proc don't
+ * work yet when it is called.
+ */
+ struct proc_dir_entry *pde;
+
+ if (!bootinfo_size)
+ return -EINVAL;
+
+ bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL);
+ if (!bootinfo_copy)
+ return -ENOMEM;
+
+ memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size);
+
+ pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL);
+ if (!pde) {
+ kfree(bootinfo_copy);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+arch_initcall(init_bootinfo_procfs);
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index ac85f16..4c99bab 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -23,7 +23,7 @@
** 98/04/25 Phil Blundell: added HP300 support
** 1998/08/30 David Kilzer: Added support for font_desc structures
** for linux-2.1.115
-** 9/02/11 Richard Zidlicky: added Q40 support (initial vesion 99/01/01)
+** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01)
** 2004/05/13 Kars de Jong: Finalised HP300 support
**
** This file is subject to the terms and conditions of the GNU General Public
@@ -257,6 +257,12 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-amiga.h>
+#include <asm/bootinfo-atari.h>
+#include <asm/bootinfo-hp300.h>
+#include <asm/bootinfo-mac.h>
+#include <asm/bootinfo-q40.h>
+#include <asm/bootinfo-vme.h>
#include <asm/setup.h>
#include <asm/entry.h>
#include <asm/pgtable.h>
@@ -1532,7 +1538,7 @@ L(cache_done):
/*
* Find a tag record in the bootinfo structure
- * The bootinfo structure is located right after the kernel bss
+ * The bootinfo structure is located right after the kernel
* Returns: d0: size (-1 if not found)
* a0: data pointer (end-of-records if not found)
*/
@@ -2909,7 +2915,9 @@ func_start serial_init,%d0/%d1/%a0/%a1
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
movel %pc@(L(mac_sccbase)),%a0
- /* Reset SCC device */
+ /* Reset SCC register pointer */
+ moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0
+ /* Reset SCC device: write register pointer then register value */
moveb #9,%a0@(mac_scc_cha_a_ctrl_offset)
moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
/* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
@@ -3896,8 +3904,6 @@ BVME_SCC_DATA_A = 0xffb0000f
#endif
#if defined(CONFIG_MAC)
-L(mac_booter_data):
- .long 0
L(mac_videobase):
.long 0
L(mac_videodepth):
diff --git a/arch/m68k/kernel/machine_kexec.c b/arch/m68k/kernel/machine_kexec.c
new file mode 100644
index 0000000..d4affc9
--- /dev/null
+++ b/arch/m68k/kernel/machine_kexec.c
@@ -0,0 +1,58 @@
+/*
+ * machine_kexec.c - handle transition of Linux booting another kernel
+ */
+#include <linux/compiler.h>
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+extern const unsigned char relocate_new_kernel[];
+extern const size_t relocate_new_kernel_size;
+
+int machine_kexec_prepare(struct kimage *kimage)
+{
+ return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void machine_shutdown(void)
+{
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+}
+
+typedef void (*relocate_kernel_t)(unsigned long ptr,
+ unsigned long start,
+ unsigned long cpu_mmu_flags) __noreturn;
+
+void machine_kexec(struct kimage *image)
+{
+ void *reboot_code_buffer;
+ unsigned long cpu_mmu_flags;
+
+ reboot_code_buffer = page_address(image->control_code_page);
+
+ memcpy(reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ /*
+ * we do not want to be bothered.
+ */
+ local_irq_disable();
+
+ pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start);
+ __flush_cache_all();
+ cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8;
+ ((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK,
+ image->start,
+ cpu_mmu_flags);
+}
diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S
new file mode 100644
index 0000000..3e09a89
--- /dev/null
+++ b/arch/m68k/kernel/relocate_kernel.S
@@ -0,0 +1,159 @@
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+
+#define MMU_BASE 8 /* MMU flags base in cpu_mmu_flags */
+
+.text
+
+ENTRY(relocate_new_kernel)
+ movel %sp@(4),%a0 /* a0 = ptr */
+ movel %sp@(8),%a1 /* a1 = start */
+ movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */
+ movew #PAGE_MASK,%d2 /* d2 = PAGE_MASK */
+
+ /* Disable MMU */
+
+ btst #MMU_BASE + MMUB_68851,%d1
+ jeq 3f
+
+1: /* 68851 or 68030 */
+
+ lea %pc@(.Lcopy),%a4
+2: addl #0x00000000,%a4 /* virt_to_phys() */
+
+ .section ".m68k_fixup","aw"
+ .long M68K_FIXUP_MEMOFFSET, 2b+2
+ .previous
+
+ .chip 68030
+ pmove %tc,%d0 /* Disable MMU */
+ bclr #7,%d0
+ pmove %d0,%tc
+ jmp %a4@ /* Jump to physical .Lcopy */
+ .chip 68k
+
+3:
+ btst #MMU_BASE + MMUB_68030,%d1
+ jne 1b
+
+ btst #MMU_BASE + MMUB_68040,%d1
+ jeq 6f
+
+4: /* 68040 or 68060 */
+
+ lea %pc@(.Lcont040),%a4
+5: addl #0x00000000,%a4 /* virt_to_phys() */
+
+ .section ".m68k_fixup","aw"
+ .long M68K_FIXUP_MEMOFFSET, 5b+2
+ .previous
+
+ movel %a4,%d0
+ andl #0xff000000,%d0
+ orw #0xe020,%d0 /* Map 16 MiB, enable, cacheable */
+ .chip 68040
+ movec %d0,%itt0
+ movec %d0,%dtt0
+ .chip 68k
+ jmp %a4@ /* Jump to physical .Lcont040 */
+
+.Lcont040:
+ moveq #0,%d0
+ .chip 68040
+ movec %d0,%tc /* Disable MMU */
+ movec %d0,%itt0
+ movec %d0,%itt1
+ movec %d0,%dtt0
+ movec %d0,%dtt1
+ .chip 68k
+ jra .Lcopy
+
+6:
+ btst #MMU_BASE + MMUB_68060,%d1
+ jne 4b
+
+.Lcopy:
+ movel %a0@+,%d0 /* d0 = entry = *ptr */
+ jeq .Lflush
+
+ btst #2,%d0 /* entry & IND_DONE? */
+ jne .Lflush
+
+ btst #1,%d0 /* entry & IND_INDIRECTION? */
+ jeq 1f
+ andw %d2,%d0
+ movel %d0,%a0 /* ptr = entry & PAGE_MASK */
+ jra .Lcopy
+
+1:
+ btst #0,%d0 /* entry & IND_DESTINATION? */
+ jeq 2f
+ andw %d2,%d0
+ movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */
+ jra .Lcopy
+
+2:
+ btst #3,%d0 /* entry & IND_SOURCE? */
+ jeq .Lcopy
+
+ andw %d2,%d0
+ movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */
+ movew #PAGE_SIZE/32 - 1,%d0 /* d0 = PAGE_SIZE/32 - 1 */
+3:
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ movel %a3@+,%a2@+ /* *dst++ = *src++ */
+ dbf %d0, 3b
+ jra .Lcopy
+
+.Lflush:
+ /* Flush all caches */
+
+ btst #CPUB_68020,%d1
+ jeq 2f
+
+1: /* 68020 or 68030 */
+ .chip 68030
+ movec %cacr,%d0
+ orw #0x808,%d0
+ movec %d0,%cacr
+ .chip 68k
+ jra .Lreincarnate
+
+2:
+ btst #CPUB_68030,%d1
+ jne 1b
+
+ btst #CPUB_68040,%d1
+ jeq 4f
+
+3: /* 68040 or 68060 */
+ .chip 68040
+ nop
+ cpusha %bc
+ nop
+ cinva %bc
+ nop
+ .chip 68k
+ jra .Lreincarnate
+
+4:
+ btst #CPUB_68060,%d1
+ jne 3b
+
+.Lreincarnate:
+ jmp %a1@
+
+relocate_new_kernel_end:
+
+ENTRY(relocate_new_kernel_size)
+ .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index e67e531..5b8ec4d 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -26,6 +26,7 @@
#include <linux/initrd.h>
#include <asm/bootinfo.h>
+#include <asm/byteorder.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
@@ -71,12 +72,12 @@ EXPORT_SYMBOL(m68k_num_memory);
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
unsigned long m68k_memoffset;
-struct mem_info m68k_memory[NUM_MEMINFO];
+struct m68k_mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
-struct mem_info m68k_ramdisk;
+static struct m68k_mem_info m68k_ramdisk __initdata;
-static char m68k_command_line[CL_SIZE];
+static char m68k_command_line[CL_SIZE] __initdata;
void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
/* machine dependent irq functions */
@@ -143,11 +144,16 @@ extern void paging_init(void);
static void __init m68k_parse_bootinfo(const struct bi_record *record)
{
- while (record->tag != BI_LAST) {
+ uint16_t tag;
+
+ save_bootinfo(record);
+
+ while ((tag = be16_to_cpu(record->tag)) != BI_LAST) {
int unknown = 0;
- const unsigned long *data = record->data;
+ const void *data = record->data;
+ uint16_t size = be16_to_cpu(record->size);
- switch (record->tag) {
+ switch (tag) {
case BI_MACHTYPE:
case BI_CPUTYPE:
case BI_FPUTYPE:
@@ -157,20 +163,27 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
case BI_MEMCHUNK:
if (m68k_num_memory < NUM_MEMINFO) {
- m68k_memory[m68k_num_memory].addr = data[0];
- m68k_memory[m68k_num_memory].size = data[1];
+ const struct mem_info *m = data;
+ m68k_memory[m68k_num_memory].addr =
+ be32_to_cpu(m->addr);
+ m68k_memory[m68k_num_memory].size =
+ be32_to_cpu(m->size);
m68k_num_memory++;
} else
- printk("m68k_parse_bootinfo: too many memory chunks\n");
+ pr_warn("%s: too many memory chunks\n",
+ __func__);
break;
case BI_RAMDISK:
- m68k_ramdisk.addr = data[0];
- m68k_ramdisk.size = data[1];
+ {
+ const struct mem_info *m = data;
+ m68k_ramdisk.addr = be32_to_cpu(m->addr);
+ m68k_ramdisk.size = be32_to_cpu(m->size);
+ }
break;
case BI_COMMAND_LINE:
- strlcpy(m68k_command_line, (const char *)data,
+ strlcpy(m68k_command_line, data,
sizeof(m68k_command_line));
break;
@@ -197,17 +210,16 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
unknown = 1;
}
if (unknown)
- printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n",
- record->tag);
- record = (struct bi_record *)((unsigned long)record +
- record->size);
+ pr_warn("%s: unknown tag 0x%04x ignored\n", __func__,
+ tag);
+ record = (struct bi_record *)((unsigned long)record + size);
}
m68k_realnum_memory = m68k_num_memory;
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
if (m68k_num_memory > 1) {
- printk("Ignoring last %i chunks of physical memory\n",
- (m68k_num_memory - 1));
+ pr_warn("%s: ignoring last %i chunks of physical memory\n",
+ __func__, (m68k_num_memory - 1));
m68k_num_memory = 1;
}
#endif
@@ -219,7 +231,7 @@ void __init setup_arch(char **cmdline_p)
int i;
#endif
- /* The bootinfo is located right after the kernel bss */
+ /* The bootinfo is located right after the kernel */
if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -247,7 +259,7 @@ void __init setup_arch(char **cmdline_p)
asm (".chip 68060; movec %%pcr,%0; .chip 68k"
: "=d" (pcr));
if (((pcr >> 8) & 0xff) <= 5) {
- printk("Enabling workaround for errata I14\n");
+ pr_warn("Enabling workaround for errata I14\n");
asm (".chip 68060; movec %0,%%pcr; .chip 68k"
: : "d" (pcr | 0x20));
}
@@ -336,12 +348,12 @@ void __init setup_arch(char **cmdline_p)
panic("No configuration setup");
}
+ paging_init();
+
#ifdef CONFIG_NATFEAT
nf_init();
#endif
- paging_init();
-
#ifndef CONFIG_SUN3
for (i = 1; i < m68k_num_memory; i++)
free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
@@ -353,7 +365,7 @@ void __init setup_arch(char **cmdline_p)
BOOTMEM_DEFAULT);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
- printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
+ pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
}
#endif
@@ -538,9 +550,9 @@ void check_bugs(void)
{
#ifndef CONFIG_M68KFPU_EMU
if (m68k_fputype == 0) {
- printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
+ pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
"WHICH IS REQUIRED BY LINUX/M68K ***\n");
- printk(KERN_EMERG "Upgrade your hardware or join the FPU "
+ pr_emerg("Upgrade your hardware or join the FPU "
"emulation project\n");
panic("no FPU");
}
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 7eb9792..958f1ad 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -28,6 +28,10 @@
#include <linux/timex.h>
#include <linux/profile.h>
+
+unsigned long (*mach_random_get_entropy)(void);
+
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 88fcd8c..6c9ca24 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -133,9 +133,7 @@ static inline void access_error060 (struct frame *fp)
{
unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
-#ifdef DEBUG
- printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
-#endif
+ pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
if (fslw & MMU060_BPE) {
/* branch prediction error -> clear branch cache */
@@ -162,9 +160,7 @@ static inline void access_error060 (struct frame *fp)
}
if (fslw & MMU060_W)
errorcode |= 2;
-#ifdef DEBUG
- printk("errorcode = %d\n", errorcode );
-#endif
+ pr_debug("errorcode = %ld\n", errorcode);
do_page_fault(&fp->ptregs, addr, errorcode);
} else if (fslw & (MMU060_SEE)){
/* Software Emulation Error.
@@ -173,8 +169,9 @@ static inline void access_error060 (struct frame *fp)
send_fault_sig(&fp->ptregs);
} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
send_fault_sig(&fp->ptregs) > 0) {
- printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
- printk( "68060 access error, fslw=%lx\n", fslw );
+ pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc,
+ fp->un.fmt4.effaddr);
+ pr_err("68060 access error, fslw=%lx\n", fslw);
trap_c( fp );
}
}
@@ -225,9 +222,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
set_fs(old_fs);
-#ifdef DEBUG
- printk("do_040writeback1, res=%d\n",res);
-#endif
+ pr_debug("do_040writeback1, res=%d\n", res);
return res;
}
@@ -249,7 +244,7 @@ static inline void do_040writebacks(struct frame *fp)
int res = 0;
#if 0
if (fp->un.fmt7.wb1s & WBV_040)
- printk("access_error040: cannot handle 1st writeback. oops.\n");
+ pr_err("access_error040: cannot handle 1st writeback. oops.\n");
#endif
if ((fp->un.fmt7.wb2s & WBV_040) &&
@@ -302,14 +297,12 @@ static inline void access_error040(struct frame *fp)
unsigned short ssw = fp->un.fmt7.ssw;
unsigned long mmusr;
-#ifdef DEBUG
- printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
- printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
+ pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
+ pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
- printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
+ pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
-#endif
if (ssw & ATC_040) {
unsigned long addr = fp->un.fmt7.faddr;
@@ -324,9 +317,7 @@ static inline void access_error040(struct frame *fp)
/* MMU error, get the MMUSR info for this access */
mmusr = probe040(!(ssw & RW_040), addr, ssw);
-#ifdef DEBUG
- printk("mmusr = %lx\n", mmusr);
-#endif
+ pr_debug("mmusr = %lx\n", mmusr);
errorcode = 1;
if (!(mmusr & MMU_R_040)) {
/* clear the invalid atc entry */
@@ -340,14 +331,10 @@ static inline void access_error040(struct frame *fp)
errorcode |= 2;
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
-#ifdef DEBUG
- printk("do_page_fault() !=0\n");
-#endif
+ pr_debug("do_page_fault() !=0\n");
if (user_mode(&fp->ptregs)){
/* delay writebacks after signal delivery */
-#ifdef DEBUG
- printk(".. was usermode - return\n");
-#endif
+ pr_debug(".. was usermode - return\n");
return;
}
/* disable writeback into user space from kernel
@@ -355,9 +342,7 @@ static inline void access_error040(struct frame *fp)
* the writeback won't do good)
*/
disable_wb:
-#ifdef DEBUG
- printk(".. disabling wb2\n");
-#endif
+ pr_debug(".. disabling wb2\n");
if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
fp->un.fmt7.wb2s &= ~WBV_040;
if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
@@ -371,7 +356,7 @@ disable_wb:
current->thread.signo = SIGBUS;
current->thread.faddr = fp->un.fmt7.faddr;
if (send_fault_sig(&fp->ptregs) >= 0)
- printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
+ pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
fp->un.fmt7.faddr);
goto disable_wb;
}
@@ -394,19 +379,17 @@ static inline void bus_error030 (struct frame *fp)
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
-#ifdef DEBUG
if (ssw & (FC | FB))
- printk ("Instruction fault at %#010lx\n",
+ pr_debug("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
-#endif
/*
* Check if this page should be demand-mapped. This needs to go before
@@ -429,7 +412,7 @@ static inline void bus_error030 (struct frame *fp)
return;
/* instruction fault or kernel data fault! */
if (ssw & (FC | FB))
- printk ("Instruction fault at %#010lx\n",
+ pr_err("Instruction fault at %#010lx\n",
fp->ptregs.pc);
if (ssw & DF) {
/* was this fault incurred testing bus mappings? */
@@ -439,12 +422,12 @@ static inline void bus_error030 (struct frame *fp)
return;
}
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
}
- printk ("BAD KERNEL BUSERR\n");
+ pr_err("BAD KERNEL BUSERR\n");
die_if_kernel("Oops", &fp->ptregs,0);
force_sig(SIGKILL, current);
@@ -473,12 +456,11 @@ static inline void bus_error030 (struct frame *fp)
else if (buserr_type & SUN3_BUSERR_INVALID)
errorcode = 0x00;
else {
-#ifdef DEBUG
- printk ("*** unexpected busfault type=%#04x\n", buserr_type);
- printk ("invalid %s access at %#lx from pc %#lx\n",
- !(ssw & RW) ? "write" : "read", addr,
- fp->ptregs.pc);
-#endif
+ pr_debug("*** unexpected busfault type=%#04x\n",
+ buserr_type);
+ pr_debug("invalid %s access at %#lx from pc %#lx\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc);
die_if_kernel ("Oops", &fp->ptregs, buserr_type);
force_sig (SIGBUS, current);
return;
@@ -509,9 +491,7 @@ static inline void bus_error030 (struct frame *fp)
if (!mmu_emu_handle_fault(addr, 1, 0))
do_page_fault (&fp->ptregs, addr, 0);
} else {
-#ifdef DEBUG
- printk ("protection fault on insn access (segv).\n");
-#endif
+ pr_debug("protection fault on insn access (segv).\n");
force_sig (SIGSEGV, current);
}
}
@@ -525,22 +505,22 @@ static inline void bus_error030 (struct frame *fp)
unsigned short ssw = fp->un.fmtb.ssw;
#ifdef DEBUG
unsigned long desc;
+#endif
- printk ("pid = %x ", current->pid);
- printk ("SSW=%#06x ", ssw);
+ pr_debug("pid = %x ", current->pid);
+ pr_debug("SSW=%#06x ", ssw);
if (ssw & (FC | FB))
- printk ("Instruction fault at %#010lx\n",
+ pr_debug("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
-#endif
/* ++andreas: If a data fault and an instruction fault happen
at the same time map in both pages. */
@@ -554,27 +534,23 @@ static inline void bus_error030 (struct frame *fp)
"pmove %%psr,%1"
: "=a&" (desc), "=m" (temp)
: "a" (addr), "d" (ssw));
+ pr_debug("mmusr is %#x for addr %#lx in task %p\n",
+ temp, addr, current);
+ pr_debug("descriptor address is 0x%p, contents %#lx\n",
+ __va(desc), *(unsigned long *)__va(desc));
#else
asm volatile ("ptestr %2,%1@,#7\n\t"
"pmove %%psr,%0"
: "=m" (temp) : "a" (addr), "d" (ssw));
#endif
mmusr = temp;
-
-#ifdef DEBUG
- printk("mmusr is %#x for addr %#lx in task %p\n",
- mmusr, addr, current);
- printk("descriptor address is %#lx, contents %#lx\n",
- __va(desc), *(unsigned long *)__va(desc));
-#endif
-
errorcode = (mmusr & MMU_I) ? 0 : 1;
if (!(ssw & RW) || (ssw & RM))
errorcode |= 2;
if (mmusr & (MMU_I | MMU_WP)) {
if (ssw & 4) {
- printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
@@ -587,9 +563,10 @@ static inline void bus_error030 (struct frame *fp)
} else if (!(mmusr & MMU_I)) {
/* probably a 020 cas fault */
if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
- printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
+ pr_err("unexpected bus error (%#x,%#x)\n", ssw,
+ mmusr);
} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
- printk("invalid %s access at %#lx from pc %#lx\n",
+ pr_err("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
die_if_kernel("Oops",&fp->ptregs,mmusr);
@@ -600,7 +577,7 @@ static inline void bus_error030 (struct frame *fp)
static volatile long tlong;
#endif
- printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
+ pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc, ssw);
asm volatile ("ptestr #1,%1@,#0\n\t"
@@ -609,18 +586,16 @@ static inline void bus_error030 (struct frame *fp)
: "a" (addr));
mmusr = temp;
- printk ("level 0 mmusr is %#x\n", mmusr);
+ pr_err("level 0 mmusr is %#x\n", mmusr);
#if 0
asm volatile ("pmove %%tt0,%0"
: "=m" (tlong));
- printk("tt0 is %#lx, ", tlong);
+ pr_debug("tt0 is %#lx, ", tlong);
asm volatile ("pmove %%tt1,%0"
: "=m" (tlong));
- printk("tt1 is %#lx\n", tlong);
-#endif
-#ifdef DEBUG
- printk("Unknown SIGSEGV - 1\n");
+ pr_debug("tt1 is %#lx\n", tlong);
#endif
+ pr_debug("Unknown SIGSEGV - 1\n");
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
@@ -641,10 +616,9 @@ static inline void bus_error030 (struct frame *fp)
return;
if (fp->ptregs.sr & PS_S) {
- printk("Instruction fault at %#010lx\n",
- fp->ptregs.pc);
+ pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc);
buserr:
- printk ("BAD KERNEL BUSERR\n");
+ pr_err("BAD KERNEL BUSERR\n");
die_if_kernel("Oops",&fp->ptregs,0);
force_sig(SIGKILL, current);
return;
@@ -668,28 +642,22 @@ static inline void bus_error030 (struct frame *fp)
"pmove %%psr,%1"
: "=a&" (desc), "=m" (temp)
: "a" (addr));
+ pr_debug("mmusr is %#x for addr %#lx in task %p\n",
+ temp, addr, current);
+ pr_debug("descriptor address is 0x%p, contents %#lx\n",
+ __va(desc), *(unsigned long *)__va(desc));
#else
asm volatile ("ptestr #1,%1@,#7\n\t"
"pmove %%psr,%0"
: "=m" (temp) : "a" (addr));
#endif
mmusr = temp;
-
-#ifdef DEBUG
- printk ("mmusr is %#x for addr %#lx in task %p\n",
- mmusr, addr, current);
- printk ("descriptor address is %#lx, contents %#lx\n",
- __va(desc), *(unsigned long *)__va(desc));
-#endif
-
if (mmusr & MMU_I)
do_page_fault (&fp->ptregs, addr, 0);
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
- printk ("invalid insn access at %#lx from pc %#lx\n",
+ pr_err("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
-#ifdef DEBUG
- printk("Unknown SIGSEGV - 2\n");
-#endif
+ pr_debug("Unknown SIGSEGV - 2\n");
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
@@ -791,9 +759,7 @@ asmlinkage void buserr_c(struct frame *fp)
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
-#ifdef DEBUG
- printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
-#endif
+ pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
if (CPU_IS_COLDFIRE) {
@@ -836,9 +802,7 @@ asmlinkage void buserr_c(struct frame *fp)
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
-#ifdef DEBUG
- printk("Unknown SIGSEGV - 4\n");
-#endif
+ pr_debug("Unknown SIGSEGV - 4\n");
force_sig(SIGSEGV, current);
}
}
@@ -852,7 +816,7 @@ void show_trace(unsigned long *stack)
unsigned long addr;
int i;
- printk("Call Trace:");
+ pr_info("Call Trace:");
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
@@ -869,13 +833,13 @@ void show_trace(unsigned long *stack)
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
- printk("\n ");
+ pr_cont("\n ");
#endif
- printk(" [<%08lx>] %pS\n", addr, (void *)addr);
+ pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
i++;
}
}
- printk("\n");
+ pr_cont("\n");
}
void show_registers(struct pt_regs *regs)
@@ -887,81 +851,87 @@ void show_registers(struct pt_regs *regs)
int i;
print_modules();
- printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
- printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
- printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
+ pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
+ pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
+ pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
regs->d0, regs->d1, regs->d2, regs->d3);
- printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
+ pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
regs->d4, regs->d5, regs->a0, regs->a1);
- printk("Process %s (pid: %d, task=%p)\n",
+ pr_info("Process %s (pid: %d, task=%p)\n",
current->comm, task_pid_nr(current), current);
addr = (unsigned long)&fp->un;
- printk("Frame format=%X ", regs->format);
+ pr_info("Frame format=%X ", regs->format);
switch (regs->format) {
case 0x2:
- printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
+ pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr);
addr += sizeof(fp->un.fmt2);
break;
case 0x3:
- printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
+ pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr);
addr += sizeof(fp->un.fmt3);
break;
case 0x4:
- printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
- : "eff addr=%08lx pc=%08lx\n"),
- fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+ if (CPU_IS_060)
+ pr_cont("fault addr=%08lx fslw=%08lx\n",
+ fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+ else
+ pr_cont("eff addr=%08lx pc=%08lx\n",
+ fp->un.fmt4.effaddr, fp->un.fmt4.pc);
addr += sizeof(fp->un.fmt4);
break;
case 0x7:
- printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
+ pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n",
fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
- printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
+ pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
- printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
+ pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
- printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
+ pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
- printk("push data: %08lx %08lx %08lx %08lx\n",
+ pr_info("push data: %08lx %08lx %08lx %08lx\n",
fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
fp->un.fmt7.pd3);
addr += sizeof(fp->un.fmt7);
break;
case 0x9:
- printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
+ pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr);
addr += sizeof(fp->un.fmt9);
break;
case 0xa:
- printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+ pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
fp->un.fmta.daddr, fp->un.fmta.dobuf);
addr += sizeof(fp->un.fmta);
break;
case 0xb:
- printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+ pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
- printk("baddr=%08lx dibuf=%08lx ver=%x\n",
+ pr_info("baddr=%08lx dibuf=%08lx ver=%x\n",
fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
addr += sizeof(fp->un.fmtb);
break;
default:
- printk("\n");
+ pr_cont("\n");
}
show_stack(NULL, (unsigned long *)addr);
- printk("Code:");
+ pr_info("Code:");
set_fs(KERNEL_DS);
cp = (u16 *)regs->pc;
for (i = -8; i < 16; i++) {
if (get_user(c, cp + i) && i >= 0) {
- printk(" Bad PC value.");
+ pr_cont(" Bad PC value.");
break;
}
- printk(i ? " %04x" : " <%04x>", c);
+ if (i)
+ pr_cont(" %04x", c);
+ else
+ pr_cont(" <%04x>", c);
}
set_fs(old_fs);
- printk ("\n");
+ pr_cont("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
@@ -978,16 +948,16 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
- printk("Stack from %08lx:", (unsigned long)stack);
+ pr_info("Stack from %08lx:", (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
- printk("\n ");
- printk(" %08lx", *p++);
+ pr_cont("\n ");
+ pr_cont(" %08lx", *p++);
}
- printk("\n");
+ pr_cont("\n");
show_trace(stack);
}
@@ -1005,32 +975,32 @@ void bad_super_trap (struct frame *fp)
console_verbose();
if (vector < ARRAY_SIZE(vec_names))
- printk ("*** %s *** FORMAT=%X\n",
+ pr_err("*** %s *** FORMAT=%X\n",
vec_names[vector],
fp->ptregs.format);
else
- printk ("*** Exception %d *** FORMAT=%X\n",
+ pr_err("*** Exception %d *** FORMAT=%X\n",
vector, fp->ptregs.format);
if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
unsigned short ssw = fp->un.fmtb.ssw;
- printk ("SSW=%#06x ", ssw);
+ pr_err("SSW=%#06x ", ssw);
if (ssw & RC)
- printk ("Pipe stage C instruction fault at %#010lx\n",
+ pr_err("Pipe stage C instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
if (ssw & RB)
- printk ("Pipe stage B instruction fault at %#010lx\n",
+ pr_err("Pipe stage B instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr, space_names[ssw & DFC],
fp->ptregs.pc);
}
- printk ("Current process id is %d\n", task_pid_nr(current));
+ pr_err("Current process id is %d\n", task_pid_nr(current));
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
}
@@ -1162,7 +1132,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
return;
console_verbose();
- printk("%s: %08x\n",str,nr);
+ pr_crit("%s: %08x\n", str, nr);
show_registers(fp);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
do_exit(SIGSEGV);
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index afb95d5..982c3fe 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -26,9 +26,10 @@
#include <linux/adb.h>
#include <linux/cuda.h>
-#define BOOTINFO_COMPAT_1_0
#include <asm/setup.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-mac.h>
+#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -107,45 +108,46 @@ static void __init mac_sched_init(irq_handler_t vector)
int __init mac_parse_bootinfo(const struct bi_record *record)
{
int unknown = 0;
- const u_long *data = record->data;
+ const void *data = record->data;
- switch (record->tag) {
+ switch (be16_to_cpu(record->tag)) {
case BI_MAC_MODEL:
- mac_bi_data.id = *data;
+ mac_bi_data.id = be32_to_cpup(data);
break;
case BI_MAC_VADDR:
- mac_bi_data.videoaddr = *data;
+ mac_bi_data.videoaddr = be32_to_cpup(data);
break;
case BI_MAC_VDEPTH:
- mac_bi_data.videodepth = *data;
+ mac_bi_data.videodepth = be32_to_cpup(data);
break;
case BI_MAC_VROW:
- mac_bi_data.videorow = *data;
+ mac_bi_data.videorow = be32_to_cpup(data);
break;
case BI_MAC_VDIM:
- mac_bi_data.dimensions = *data;
+ mac_bi_data.dimensions = be32_to_cpup(data);
break;
case BI_MAC_VLOGICAL:
- mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK);
- mac_orig_videoaddr = *data;
+ mac_orig_videoaddr = be32_to_cpup(data);
+ mac_bi_data.videological =
+ VIDEOMEMBASE + (mac_orig_videoaddr & ~VIDEOMEMMASK);
break;
case BI_MAC_SCCBASE:
- mac_bi_data.sccbase = *data;
+ mac_bi_data.sccbase = be32_to_cpup(data);
break;
case BI_MAC_BTIME:
- mac_bi_data.boottime = *data;
+ mac_bi_data.boottime = be32_to_cpup(data);
break;
case BI_MAC_GMTBIAS:
- mac_bi_data.gmtbias = *data;
+ mac_bi_data.gmtbias = be32_to_cpup(data);
break;
case BI_MAC_MEMSIZE:
- mac_bi_data.memsize = *data;
+ mac_bi_data.memsize = be32_to_cpup(data);
break;
case BI_MAC_CPUID:
- mac_bi_data.cpuid = *data;
+ mac_bi_data.cpuid = be32_to_cpup(data);
break;
case BI_MAC_ROMBASE:
- mac_bi_data.rombase = *data;
+ mac_bi_data.rombase = be32_to_cpup(data);
break;
default:
unknown = 1;
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 7d8d461..4d2adfb 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -111,16 +111,15 @@
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_iop.h>
/*#define DEBUG_IOP*/
-/* Set to non-zero if the IOPs are present. Set by iop_init() */
+/* Non-zero if the IOPs are present */
-int iop_scc_present,iop_ism_present;
+int iop_scc_present, iop_ism_present;
/* structure for tracking channel listeners */
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 5e08555..707b61a 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -25,8 +25,6 @@
#include <asm/mac_via.h>
#include <asm/mac_oss.h>
-#define BOOTINFO_COMPAT_1_0
-#include <asm/bootinfo.h>
#include <asm/machdep.h>
/* Offset between Unix time (1970-based) and Mac time (1904-based) */
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 6c4c882..5403712 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -21,7 +21,6 @@
#include <linux/init.h>
#include <linux/irq.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_via.h>
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index 6f026fc..835fa04 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -21,7 +21,6 @@
#include <linux/irq.h>
#include <asm/traps.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_psc.h>
@@ -54,7 +53,7 @@ static void psc_debug_dump(void)
* expanded to cover what I think are the other 7 channels.
*/
-static void psc_dma_die_die_die(void)
+static __init void psc_dma_die_die_die(void)
{
int i;
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 5d1458b..e198dec 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/irq.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_via.h>
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index eb1d61f..2bd7487 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -25,9 +25,8 @@ int send_fault_sig(struct pt_regs *regs)
siginfo.si_signo = current->thread.signo;
siginfo.si_code = current->thread.code;
siginfo.si_addr = (void *)current->thread.faddr;
-#ifdef DEBUG
- printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code);
-#endif
+ pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr,
+ siginfo.si_signo, siginfo.si_code);
if (user_mode(regs)) {
force_sig_info(siginfo.si_signo,
@@ -45,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs)
* terminate things with extreme prejudice.
*/
if ((unsigned long)siginfo.si_addr < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ pr_alert("Unable to handle kernel NULL pointer dereference");
else
- printk(KERN_ALERT "Unable to handle kernel access");
- printk(" at virtual address %p\n", siginfo.si_addr);
+ pr_alert("Unable to handle kernel access");
+ pr_cont(" at virtual address %p\n", siginfo.si_addr);
die_if_kernel("Oops", regs, 0 /*error_code*/);
do_exit(SIGKILL);
}
@@ -75,11 +74,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-#ifdef DEBUG
- printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
- regs->sr, regs->pc, address, error_code,
- current->mm->pgd);
-#endif
+ pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
+ regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
/*
* If we're in an interrupt or have no user
@@ -118,9 +114,7 @@ retry:
* we can handle it..
*/
good_area:
-#ifdef DEBUG
- printk("do_page_fault: good_area\n");
-#endif
+ pr_debug("do_page_fault: good_area\n");
switch (error_code & 3) {
default: /* 3: write, present */
/* fall through */
@@ -143,9 +137,7 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
-#ifdef DEBUG
- printk("handle_mm_fault returns %d\n",fault);
-#endif
+ pr_debug("handle_mm_fault returns %d\n", fault);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 6b4baa6..acaff6a 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(pg_data_table);
void __init m68k_setup_node(int node)
{
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
- struct mem_info *info = m68k_memory + node;
+ struct m68k_mem_info *info = m68k_memory + node;
int i, end;
i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 568cfad..6e4955b 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -27,9 +27,9 @@
/*
* For 040/060 we can use the virtual memory area like other architectures,
- * but for 020/030 we want to use early termination page descriptor and we
+ * but for 020/030 we want to use early termination page descriptors and we
* can't mix this with normal page descriptors, so we have to copy that code
- * (mm/vmalloc.c) and return appriorate aligned addresses.
+ * (mm/vmalloc.c) and return appropriately aligned addresses.
*/
#ifdef CPU_M68040_OR_M68060_ONLY
@@ -224,7 +224,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
EXPORT_SYMBOL(__ioremap);
/*
- * Unmap a ioremap()ed region again
+ * Unmap an ioremap()ed region again
*/
void iounmap(void __iomem *addr)
{
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(iounmap);
/*
* __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
+ * Currently it doesn't free pointer/page tables anymore but this
+ * wasn't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 251c543..7d40244 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -233,7 +233,7 @@ void __init paging_init(void)
printk("Fix your bootloader or use a memfile to make use of this area!\n");
m68k_num_memory--;
memmove(m68k_memory + i, m68k_memory + i + 1,
- (m68k_num_memory - i) * sizeof(struct mem_info));
+ (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
continue;
}
addr = m68k_memory[i].addr + m68k_memory[i].size;
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 1c62628..1bb3ce6 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -26,6 +26,8 @@
#include <linux/interrupt.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-vme.h>
+#include <asm/byteorder.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -51,9 +53,10 @@ static int bcd2int (unsigned char b);
irq_handler_t tick_handler;
-int mvme147_parse_bootinfo(const struct bi_record *bi)
+int __init mvme147_parse_bootinfo(const struct bi_record *bi)
{
- if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO)
+ uint16_t tag = be16_to_cpu(bi->tag);
+ if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO)
return 0;
else
return 1;
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 080a342..eab7d34 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -29,6 +29,8 @@
#include <linux/module.h>
#include <asm/bootinfo.h>
+#include <asm/bootinfo-vme.h>
+#include <asm/byteorder.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -60,9 +62,10 @@ unsigned short mvme16x_config;
EXPORT_SYMBOL(mvme16x_config);
-int mvme16x_parse_bootinfo(const struct bi_record *bi)
+int __init mvme16x_parse_bootinfo(const struct bi_record *bi)
{
- if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO)
+ uint16_t tag = be16_to_cpu(bi->tag);
+ if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO)
return 0;
else
return 1;
@@ -87,15 +90,15 @@ static void mvme16x_get_model(char *model)
suf[3] = '\0';
suf[0] = suf[1] ? '-' : '\0';
- sprintf(model, "Motorola MVME%x%s", p->brdno, suf);
+ sprintf(model, "Motorola MVME%x%s", be16_to_cpu(p->brdno), suf);
}
static void mvme16x_get_hardware_list(struct seq_file *m)
{
- p_bdid p = &mvme_bdid;
+ uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
- if (p->brdno == 0x0162 || p->brdno == 0x0172)
+ if (brdno == 0x0162 || brdno == 0x0172)
{
unsigned char rev = *(unsigned char *)MVME162_VERSION_REG;
@@ -285,6 +288,7 @@ void __init config_mvme16x(void)
{
p_bdid p = &mvme_bdid;
char id[40];
+ uint16_t brdno = be16_to_cpu(p->brdno);
mach_max_dma_address = 0xffffffff;
mach_sched_init = mvme16x_sched_init;
@@ -306,18 +310,18 @@ void __init config_mvme16x(void)
}
/* Board type is only set by newer versions of vmelilo/tftplilo */
if (vme_brdtype == 0)
- vme_brdtype = p->brdno;
+ vme_brdtype = brdno;
mvme16x_get_model(id);
printk ("\nBRD_ID: %s BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4,
p->rev&0xf, p->yr, p->mth, p->day);
- if (p->brdno == 0x0162 || p->brdno == 0x172)
+ if (brdno == 0x0162 || brdno == 0x172)
{
unsigned char rev = *(unsigned char *)MVME162_VERSION_REG;
mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA;
- printk ("MVME%x Hardware status:\n", p->brdno);
+ printk ("MVME%x Hardware status:\n", brdno);
printk (" CPU Type 68%s040\n",
rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC");
printk (" CPU clock %dMHz\n",
@@ -347,12 +351,12 @@ void __init config_mvme16x(void)
static irqreturn_t mvme16x_abort_int (int irq, void *dev_id)
{
- p_bdid p = &mvme_bdid;
unsigned long *new = (unsigned long *)vectors;
unsigned long *old = (unsigned long *)0xffe00000;
volatile unsigned char uc, *ucp;
+ uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
- if (p->brdno == 0x0162 || p->brdno == 0x172)
+ if (brdno == 0x0162 || brdno == 0x172)
{
ucp = (volatile unsigned char *)0xfff42043;
uc = *ucp | 8;
@@ -366,7 +370,7 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id)
*(new+9) = *(old+9); /* Trace */
*(new+47) = *(old+47); /* Trap #15 */
- if (p->brdno == 0x0162 || p->brdno == 0x172)
+ if (brdno == 0x0162 || brdno == 0x172)
*(new+0x5e) = *(old+0x5e); /* ABORT switch */
else
*(new+0x6e) = *(old+0x6e); /* ABORT switch */
@@ -381,7 +385,7 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
void mvme16x_sched_init (irq_handler_t timer_routine)
{
- p_bdid p = &mvme_bdid;
+ uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
int irq;
tick_handler = timer_routine;
@@ -394,7 +398,7 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
"timer", mvme16x_timer_int))
panic ("Couldn't register timer int");
- if (p->brdno == 0x0162 || p->brdno == 0x172)
+ if (brdno == 0x0162 || brdno == 0x172)
irq = MVME162_IRQ_ABORT;
else
irq = MVME167_IRQ_ABORT;
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 078bb74..e90fe90 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -154,7 +154,7 @@ static unsigned int serports[] =
0x3f8,0x2f8,0x3e8,0x2e8,0
};
-static void q40_disable_irqs(void)
+static void __init q40_disable_irqs(void)
{
unsigned i, j;
@@ -198,7 +198,7 @@ void __init config_q40(void)
}
-int q40_parse_bootinfo(const struct bi_record *rec)
+int __init q40_parse_bootinfo(const struct bi_record *rec)
{
return 1;
}
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index d522eaa..d95506e 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
@@ -62,10 +63,7 @@ int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
}
-void sun3_dvma_init(void)
+void __init sun3_dvma_init(void)
{
-
memset(ptelist, 0, sizeof(ptelist));
-
-
}
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 8edc510..3f258e2 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -6,6 +6,7 @@
** Started 1/16/98 @ 2:22 am
*/
+#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/kernel.h>
@@ -122,7 +123,7 @@ void print_pte_vaddr (unsigned long vaddr)
/*
* Initialise the MMU emulator.
*/
-void mmu_emu_init(unsigned long bootmem_end)
+void __init mmu_emu_init(unsigned long bootmem_end)
{
unsigned long seg, num;
int i,j;
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index cab5448..b37521a 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -6,6 +6,8 @@
* Contains common routines for sun3/sun3x DVMA management.
*/
+#include <linux/bootmem.h>
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
@@ -30,7 +32,7 @@ static inline void dvma_unmap_iommu(unsigned long a, int b)
extern void sun3_dvma_init(void);
#endif
-static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES];
+static unsigned long *iommu_use;
#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
@@ -245,7 +247,7 @@ static inline int free_baddr(unsigned long baddr)
}
-void dvma_init(void)
+void __init dvma_init(void)
{
struct hole *hole;
@@ -265,7 +267,7 @@ void dvma_init(void)
list_add(&(hole->list), &hole_list);
- memset(iommu_use, 0, sizeof(iommu_use));
+ iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
index a7b7e81..0898c3f 100644
--- a/arch/m68k/sun3x/prom.c
+++ b/arch/m68k/sun3x/prom.c
@@ -10,7 +10,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <asm/bootinfo.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/sun3xprom.h>
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index c90bfc6..5d6b4b4 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -82,4 +82,19 @@ static inline void fence(void)
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h
index e0373f8..1d7e770 100644
--- a/arch/metag/include/asm/smp.h
+++ b/arch/metag/include/asm/smp.h
@@ -7,13 +7,11 @@
enum ipi_msg_type {
IPI_CALL_FUNC,
- IPI_CALL_FUNC_SINGLE,
IPI_RESCHEDULE,
};
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
asmlinkage void secondary_start_kernel(void);
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index db589ad..c700d62 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -399,11 +399,6 @@ static int __init dma_alloc_init(void)
pgd = pgd_offset(&init_mm, CONSISTENT_START);
pud = pud_alloc(&init_mm, pgd, CONSISTENT_START);
pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START);
- if (!pmd) {
- pr_err("%s: no pmd tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, CONSISTENT_START);
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 7c01131..f006d22 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running);
/*
* "thread" is assumed to be a valid Meta hardware thread ID.
*/
-int boot_secondary(unsigned int thread, struct task_struct *idle)
+static int boot_secondary(unsigned int thread, struct task_struct *idle)
{
u32 val;
@@ -491,7 +491,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
}
void show_ipi_list(struct seq_file *p)
@@ -517,11 +517,10 @@ static DEFINE_SPINLOCK(stop_lock);
*
* Bit 0 - Inter-processor function call
*/
-static int do_IPI(struct pt_regs *regs)
+static int do_IPI(void)
{
unsigned int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
- struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long msgs, nextmsg;
int handled = 0;
@@ -546,10 +545,6 @@ static int do_IPI(struct pt_regs *regs)
generic_smp_call_function_interrupt();
break;
- case IPI_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
-
default:
pr_crit("CPU%u: Unknown IPI message 0x%lx\n",
cpu, nextmsg);
@@ -557,8 +552,6 @@ static int do_IPI(struct pt_regs *regs)
}
}
- set_irq_regs(old_regs);
-
return handled;
}
@@ -624,7 +617,7 @@ static void kick_raise_softirq(cpumask_t callmap, unsigned int irq)
static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers,
int Inst, PTBI pTBI, int *handled)
{
- *handled = do_IPI((struct pt_regs *)State.Sig.pCtx);
+ *handled = do_IPI();
return State;
}
diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c
index bec3dec..4ba59570 100644
--- a/arch/metag/kernel/topology.c
+++ b/arch/metag/kernel/topology.c
@@ -19,6 +19,7 @@
DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data);
cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
static cpumask_t cpu_coregroup_map(unsigned int cpu)
{
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index ce0bbf8..a824265 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,4 +1,5 @@
+generic-y += barrier.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h
deleted file mode 100644
index df5be3e8..0000000
--- a/arch/microblaze/include/asm/barrier.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_BARRIER_H
-#define _ASM_MICROBLAZE_BARRIER_H
-
-#define nop() asm volatile ("nop")
-
-#define smp_read_barrier_depends() do {} while (0)
-#define read_barrier_depends() do {} while (0)
-
-#define mb() barrier()
-#define rmb() mb()
-#define wmb() mb()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-
-#endif /* _ASM_MICROBLAZE_BARRIER_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 650de39..c93d92b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -47,6 +47,7 @@ config MIPS
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select CLONE_BACKWARDS
select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_CC_STACKPROTECTOR
menu "Machine selection"
@@ -2322,19 +2323,6 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
-config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
- help
- This option turns on the -fstack-protector GCC feature. This
- feature puts, at the beginning of functions, a canary value on
- the stack just before the return address, and validates
- the value just before actually returning. Stack based buffer
- overflows (that need to overwrite this return address) now also
- overwrite the canary, which gets detected and the attack is then
- neutralized via a kernel panic.
-
- This feature requires gcc version 4.2 or above.
-
config USE_OF
bool
select OF
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index de300b9..efe50787 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -232,10 +232,6 @@ bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
LDFLAGS += -m $(ld-emul)
-ifdef CONFIG_CC_STACKPROTECTOR
- KBUILD_CFLAGS += -fstack-protector
-endif
-
ifdef CONFIG_MIPS
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
index 9a357ff..820b7a3 100644
--- a/arch/mips/ar7/setup.c
+++ b/arch/mips/ar7/setup.c
@@ -92,7 +92,6 @@ void __init plat_mem_setup(void)
_machine_restart = ar7_machine_restart;
_machine_halt = ar7_machine_halt;
pm_power_off = ar7_machine_power_off;
- panic_timeout = 3;
io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
if (!io_base)
diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c
index d710058..9100122 100644
--- a/arch/mips/emma/markeins/setup.c
+++ b/arch/mips/emma/markeins/setup.c
@@ -111,9 +111,6 @@ void __init plat_mem_setup(void)
iomem_resource.start = EMMA2RH_IO_BASE;
iomem_resource.end = EMMA2RH_ROM_BASE - 1;
- /* Reboot on panic */
- panic_timeout = 180;
-
markeins_sio_setup();
}
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index f26d8e1..e1aa4e4 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -180,4 +180,19 @@
#define nudge_writes() mb()
#endif
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index c75025f..06b9bc7 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -83,6 +83,6 @@
/*
* Loongson2-specific cacheops
*/
-#define Hit_Invalidate_I_Loongson23 0x00
+#define Hit_Invalidate_I_Loongson2 0x00
#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 34d1a19..c84cadd 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr)
__iflush_prologue
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
- cache_op(Hit_Invalidate_I_Loongson23, addr);
+ cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
default:
@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr)
{
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
- protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
+ protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
default:
@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr)
"i" (op));
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
-#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
-static inline void blast_##pfx##cache##lsize(void) \
+#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
+static inline void extra##blast_##pfx##cache##lsize(void) \
{ \
unsigned long start = INDEX_BASE; \
unsigned long end = start + current_cpu_data.desc.waysize; \
@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \
__##pfx##flush_epilogue \
} \
\
-static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
+static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
{ \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
__##pfx##flush_epilogue \
} \
\
-static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
+static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
{ \
unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
unsigned long start = INDEX_BASE + (page & indexmask); \
@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
__##pfx##flush_epilogue \
}
-__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
-__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
-__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
-__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
-__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
-__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
-
-__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
-__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
+
+__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
+__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
/* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
-__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
- protected_, loongson23_)
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
+ protected_, loongson2_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 62ffd20..49e572d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
+ r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
else if (ic_lsize == 64)
@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void)
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed;
+ else if (current_cpu_type() == CPU_LOONGSON2)
+ r4k_blast_icache_page_indexed =
+ loongson2_blast_icache32_page_indexed;
else
r4k_blast_icache_page_indexed =
blast_icache32_page_indexed;
@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void)
r4k_blast_icache = blast_r4600_v1_icache32;
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache = tx49_blast_icache32;
+ else if (current_cpu_type() == CPU_LOONGSON2)
+ r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
} else if (ic_lsize == 64)
@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
else {
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
- protected_blast_icache_range(start, end);
+ protected_loongson2_blast_icache_range(start, end);
break;
default:
- protected_loongson23_blast_icache_range(start, end);
+ protected_blast_icache_range(start, end);
break;
}
}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 6d981bb..54e75c7 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -92,7 +92,6 @@ static void __init xlp_init_mem_from_bars(void)
void __init plat_mem_setup(void)
{
- panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 214d123..921be5f 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -92,7 +92,6 @@ static void nlm_linux_exit(void)
void __init plat_mem_setup(void)
{
- panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 41707a2..3462c83 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -134,8 +134,6 @@ void __init plat_mem_setup(void)
#error invalid SiByte board configuration
#endif
- panic_timeout = 5; /* For debug. */
-
board_be_handler = swarm_be_handler;
if (xicor_probe())
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 74742dc..032143e 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -1,4 +1,5 @@
+generic-y += barrier.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h
deleted file mode 100644
index 2bd97a5..0000000
--- a/arch/mn10300/include/asm/barrier.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* MN10300 memory barrier definitions
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_BARRIER_H
-#define _ASM_BARRIER_H
-
-#define nop() asm volatile ("nop")
-
-#define mb() asm volatile ("": : :"memory")
-#define rmb() mb()
-#define wmb() asm volatile ("": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#else /* CONFIG_SMP */
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#endif /* CONFIG_SMP */
-
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define read_barrier_depends() do {} while (0)
-#define smp_read_barrier_depends() do {} while (0)
-
-#endif /* _ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index a603b9e..34b0be4 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,4 +1,5 @@
+generic-y += barrier.h
generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \
div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
deleted file mode 100644
index e77d834..0000000
--- a/arch/parisc/include/asm/barrier.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __PARISC_BARRIER_H
-#define __PARISC_BARRIER_H
-
-/*
-** This is simply the barrier() macro from linux/kernel.h but when serial.c
-** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
-** hasn't yet been included yet so it fails, thus repeating the macro here.
-**
-** PA-RISC architecture allows for weakly ordered memory accesses although
-** none of the processors use it. There is a strong ordered bit that is
-** set in the O-bit of the page directory entry. Operating systems that
-** can not tolerate out of order accesses should set this bit when mapping
-** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
-** of the processor implemented the PSW O-bit). The PCX-W ERS states that
-** the TLB O-bit is not implemented so the page directory does not need to
-** have the O-bit set when mapping pages (section 3.1). This section also
-** states that the PSW Y, Z, G, and O bits are not implemented.
-** So it looks like nothing needs to be done for parisc-linux (yet).
-** (thanks to chada for the above comment -ggg)
-**
-** The __asm__ op below simple prevents gcc/ld from reordering
-** instructions across the mb() "call".
-*/
-#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
-#define rmb() mb()
-#define wmb() mb()
-#define smp_mb() mb()
-#define smp_rmb() mb()
-#define smp_wmb() mb()
-#define smp_read_barrier_depends() do { } while(0)
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-
-#endif /* __PARISC_BARRIER_H */
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f0e2784..2f9b751 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
void mark_rodata_ro(void);
#endif
-#ifdef CONFIG_PA8X00
-/* Only pa8800, pa8900 needs this */
-
#include <asm/kmap_types.h>
#define ARCH_HAS_KMAP
-void kunmap_parisc(void *addr);
-
static inline void *kmap(struct page *page)
{
might_sleep();
+ flush_dcache_page(page);
return page_address(page);
}
static inline void kunmap(struct page *page)
{
- kunmap_parisc(page_address(page));
+ flush_kernel_dcache_page_addr(page_address(page));
}
static inline void *kmap_atomic(struct page *page)
{
pagefault_disable();
+ flush_dcache_page(page);
return page_address(page);
}
static inline void __kunmap_atomic(void *addr)
{
- kunmap_parisc(addr);
+ flush_kernel_dcache_page_addr(addr);
pagefault_enable();
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
-#endif
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index b7adb2a..c53fc63 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -28,9 +28,8 @@ struct page;
void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg);
+#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
/* #define CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f33113a..70b3674 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -75,6 +75,6 @@
#define SO_BUSY_POLL 0x4027
-#define SO_MAX_PACING_RATE 0x4048
+#define SO_MAX_PACING_RATE 0x4028
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c035673..a725455 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
-{
- clear_page_asm(vto);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
-{
- /* Copy using kernel mapping. No coherency is needed
- (all in kmap/kunmap) on machines that don't support
- non-equivalent aliasing. However, the `from' page
- needs to be flushed before it can be accessed through
- the kernel mapping. */
- preempt_disable();
- flush_dcache_page_asm(__pa(vfrom), vaddr);
- preempt_enable();
- copy_page_asm(vto, vfrom);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(copy_user_page);
-
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
-{
- if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(addr);
-}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
-
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b44b52c..b2be8e8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -147,6 +147,10 @@ config EARLY_PRINTK
bool
default y
+config PANIC_TIMEOUT
+ int
+ default 180
+
config COMPAT
bool
default y if PPC64
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index 4177b62..a618dfc 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -58,7 +58,6 @@
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
#size-cells = <1>;
- #interrupt-cells = <2>;
ranges = <0x0 0x80000000 0x400000>;
reg = <0x80000000 0x400000>;
bus-frequency = <66000000>; // 66 MHz ips bus
@@ -189,6 +188,10 @@
reg = <0xA000 0x1000>;
};
+ // disable USB1 port
+ // TODO:
+ // correct pinmux config and fix USB3320 ulpi dependency
+ // before re-enabling it
usb@3000 {
compatible = "fsl,mpc5121-usb2-dr";
reg = <0x3000 0x400>;
@@ -197,6 +200,7 @@
interrupts = <43 0x8>;
dr_mode = "host";
phy_type = "ulpi";
+ status = "disabled";
};
// 5125 PSCs are not 52xx or 5121 PSC compatible
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index ae78225..f89da80 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -45,11 +45,15 @@
# define SMPWMB eieio
#endif
+#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+
#define smp_mb() mb()
-#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_rmb() __lwsync()
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
+#define __lwsync() barrier()
+
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
@@ -65,4 +69,19 @@
#define data_barrier(x) \
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ___p1; \
+})
+
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 894662a..243ce69 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -284,7 +284,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
+1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 703a841..11ba86e 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long);
void check_for_initrd(void);
void do_init_bootmem(void);
void setup_panic(void);
+#define ARCH_PANIC_TIMEOUT 180
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 5f54a74..f6e78d6 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,6 +28,8 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
+#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
+
#define arch_spin_is_locked(x) ((x)->slock != 0)
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
index 5f1b1e3..8296381 100644
--- a/arch/powerpc/include/asm/unaligned.h
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -4,13 +4,18 @@
#ifdef __KERNEL__
/*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+#else
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 75c6ecd..7422a99 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- u8 insn[MAX_UINSN_BYTES];
- u8 ixol[MAX_UINSN_BYTES];
- u32 ainsn;
+ u32 insn;
+ u32 ixol;
};
};
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2ae41ab..4f0946d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
* of the function that the cpu should jump to to continue
* initialization.
*/
+ .balign 8
.globl __secondary_hold_spinloop
__secondary_hold_spinloop:
.llong 0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
+.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index cb64a6e..078145a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void)
/* Get the full OF pathname of the stdout device */
memset(path, 0, 256);
call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
- stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
- val = cpu_to_be32(stdout_node);
- prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
- &val, sizeof(val));
prom_printf("OF stdout device is: %s\n", of_stdout_device);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
path, strlen(path) + 1);
- /* If it's a display, note it */
- memset(type, 0, sizeof(type));
- prom_getprop(stdout_node, "device_type", type, sizeof(type));
- if (strcmp(type, "display") == 0)
- prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
+ /* instance-to-package fails on PA-Semi */
+ stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
+ if (stdout_node != PROM_ERROR) {
+ val = cpu_to_be32(stdout_node);
+ prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
+ &val, sizeof(val));
+
+ /* If it's a display, note it */
+ memset(type, 0, sizeof(type));
+ prom_getprop(stdout_node, "device_type", type, sizeof(type));
+ if (strcmp(type, "display") == 0)
+ prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
+ }
}
static int __init prom_find_machine_type(void)
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index b903dc5..2b0da27 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -296,9 +296,6 @@ void __init setup_arch(char **cmdline_p)
if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
ucache_bsize = icache_bsize = dcache_bsize;
- /* reboot on panic */
- panic_timeout = 180;
-
if (ppc_md.panic)
setup_panic();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4085aaa..856dd4e99 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -588,9 +588,6 @@ void __init setup_arch(char **cmdline_p)
dcache_bsize = ppc64_caches.dline_size;
icache_bsize = ppc64_caches.iline_size;
- /* reboot on panic */
- panic_timeout = 180;
-
if (ppc_md.panic)
setup_panic();
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 59f419b..003b209 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, auprobe->ainsn);
+ ret = emulate_step(regs, auprobe->insn);
if (ret > 0)
return true;
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index d73a590..596a285 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,14 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#ifdef __BIG_ENDIAN__
+#define sLd sld /* Shift towards low-numbered address. */
+#define sHd srd /* Shift towards high-numbered address. */
+#else
+#define sLd srd /* Shift towards low-numbered address. */
+#define sHd sld /* Shift towards high-numbered address. */
+#endif
+
.align 7
_GLOBAL(__copy_tofrom_user)
BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
- sld r6,r9,r10
+ sLd r6,r9,r10
26: ldu r9,16(r4)
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
- sld r8,r0,r10
+ sLd r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
- srd r12,r9,r11
- sld r6,r9,r10
+ sHd r12,r9,r11
+ sLd r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
-2: srd r12,r9,r11
- sld r6,r9,r10
+2: sHd r12,r9,r11
+ sLd r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
-5: srd r12,r9,r11
+5: sHd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
- sld r9,r9,r10
+ sLd r9,r9,r10
ble cr1,7f
34: ld r0,8(r4)
- srd r7,r0,r11
+ sHd r7,r0,r11
or r9,r7,r9
7:
bf cr7*4+1,1f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
+#endif
94: stw r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,32
+#endif
addi r3,r3,4
1: bf cr7*4+2,2f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
+#endif
95: sth r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,16
+#endif
addi r3,r3,2
2: bf cr7*4+3,3f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
+#endif
96: stb r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,8
+#endif
3: li r3,0
blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index ac3c2a1..555034f 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
}
PPC_DIVWU(r_A, r_A, r_X);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
PPC_LI32(r_scratch1, K);
- /* Top 32 bits of 64bit result -> A */
- PPC_MULHWU(r_A, r_A, r_scratch1);
+ PPC_DIVWU(r_A, r_A, r_scratch1);
break;
case BPF_S_ALU_AND_X:
ctx->seen |= SEEN_XREG;
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 02245ce..d7ddcee 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -36,7 +36,6 @@
#include "powernv.h"
#include "pci.h"
-static char *hub_diag = NULL;
static int ioda_eeh_nb_init = 0;
static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
ioda_eeh_nb_init = 1;
}
- /* We needn't HUB diag-data on PHB3 */
- if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
- hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (!hub_diag) {
- pr_err("%s: Out of memory !\n", __func__);
- return -ENOMEM;
- }
- }
-
#ifdef CONFIG_DEBUG_FS
if (phb->dbgfs) {
debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
static void ioda_eeh_hub_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data;
+ struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
long rc;
- data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
- rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
+ rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
__func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
struct OpalIoPhbErrorCommon *common;
long rc;
- common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
+ common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
switch (common->ioType) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
ioda_eeh_p7ioc_phb_diag(hose, common);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 911c24e..1ed8d5f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -172,11 +172,13 @@ struct pnv_phb {
} ioda;
};
- /* PHB status structure */
+ /* PHB and hub status structure */
union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc;
+ struct OpalIoP7IOCErrorData hub_diag;
} diag;
+
};
extern struct pci_ops pnv_pci_ops;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c1f1908..6f76ae4 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -470,7 +470,7 @@ static long pseries_little_endian_exceptions(void)
static void __init pSeries_setup_arch(void)
{
- panic_timeout = 10;
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Discover PIC type and setup ppc_md accordingly */
pseries_discover_pic();
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1e1a03d..e9f3125 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -135,7 +135,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
- select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
select OLD_SIGACTION
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 16760ee..578680f 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -32,4 +32,19 @@
#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 4bf9da0..5d7e8cf 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -38,7 +38,8 @@
#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
- PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
+ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
+ PSW32_ASC_PRIMARY)
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index c879fad..cb700d5 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -56,6 +56,96 @@ struct cpumf_ctr_info {
u32 reserved2[12];
} __packed;
+/* QUERY SAMPLING INFORMATION block */
+struct hws_qsi_info_block { /* Bit(s) */
+ unsigned int b0_13:14; /* 0-13: zeros */
+ unsigned int as:1; /* 14: basic-sampling authorization */
+ unsigned int ad:1; /* 15: diag-sampling authorization */
+ unsigned int b16_21:6; /* 16-21: zeros */
+ unsigned int es:1; /* 22: basic-sampling enable control */
+ unsigned int ed:1; /* 23: diag-sampling enable control */
+ unsigned int b24_29:6; /* 24-29: zeros */
+ unsigned int cs:1; /* 30: basic-sampling activation control */
+ unsigned int cd:1; /* 31: diag-sampling activation control */
+ unsigned int bsdes:16; /* 4-5: size of basic sampling entry */
+ unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */
+ unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
+ unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
+ unsigned long tear; /* 24-31: TEAR contents */
+ unsigned long dear; /* 32-39: DEAR contents */
+ unsigned int rsvrd0; /* 40-43: reserved */
+ unsigned int cpu_speed; /* 44-47: CPU speed */
+ unsigned long long rsvrd1; /* 48-55: reserved */
+ unsigned long long rsvrd2; /* 56-63: reserved */
+} __packed;
+
+/* SET SAMPLING CONTROLS request block */
+struct hws_lsctl_request_block {
+ unsigned int s:1; /* 0: maximum buffer indicator */
+ unsigned int h:1; /* 1: part. level reserved for VM use*/
+ unsigned long long b2_53:52;/* 2-53: zeros */
+ unsigned int es:1; /* 54: basic-sampling enable control */
+ unsigned int ed:1; /* 55: diag-sampling enable control */
+ unsigned int b56_61:6; /* 56-61: - zeros */
+ unsigned int cs:1; /* 62: basic-sampling activation control */
+ unsigned int cd:1; /* 63: diag-sampling activation control */
+ unsigned long interval; /* 8-15: sampling interval */
+ unsigned long tear; /* 16-23: TEAR contents */
+ unsigned long dear; /* 24-31: DEAR contents */
+ /* 32-63: */
+ unsigned long rsvrd1; /* reserved */
+ unsigned long rsvrd2; /* reserved */
+ unsigned long rsvrd3; /* reserved */
+ unsigned long rsvrd4; /* reserved */
+} __packed;
+
+struct hws_basic_entry {
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int R:4; /* 16-19 reserved */
+ unsigned int U:4; /* 20-23 Number of unique instruct. */
+ unsigned int z:2; /* zeros */
+ unsigned int T:1; /* 26 PSW DAT mode */
+ unsigned int W:1; /* 27 PSW wait state */
+ unsigned int P:1; /* 28 PSW Problem state */
+ unsigned int AS:2; /* 29-30 PSW address-space control */
+ unsigned int I:1; /* 31 entry valid or invalid */
+ unsigned int:16;
+ unsigned int prim_asn:16; /* primary ASN */
+ unsigned long long ia; /* Instruction Address */
+ unsigned long long gpp; /* Guest Program Parameter */
+ unsigned long long hpp; /* Host Program Parameter */
+} __packed;
+
+struct hws_diag_entry {
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int R:14; /* 16-19 and 20-30 reserved */
+ unsigned int I:1; /* 31 entry valid or invalid */
+ u8 data[]; /* Machine-dependent sample data */
+} __packed;
+
+struct hws_combined_entry {
+ struct hws_basic_entry basic; /* Basic-sampling data entry */
+ struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
+} __packed;
+
+struct hws_trailer_entry {
+ union {
+ struct {
+ unsigned int f:1; /* 0 - Block Full Indicator */
+ unsigned int a:1; /* 1 - Alert request control */
+ unsigned int t:1; /* 2 - Timestamp format */
+ unsigned long long:61; /* 3 - 63: Reserved */
+ };
+ unsigned long long flags; /* 0 - 63: All indicators */
+ };
+ unsigned long long overflow; /* 64 - sample Overflow count */
+ unsigned char timestamp[16]; /* 16 - 31 timestamp */
+ unsigned long long reserved1; /* 32 -Reserved */
+ unsigned long long reserved2; /* */
+ unsigned long long progusage1; /* 48 - reserved for programming use */
+ unsigned long long progusage2; /* */
+} __packed;
+
/* Query counter information */
static inline int qctri(struct cpumf_ctr_info *info)
{
@@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val)
return cc;
}
+/* Query sampling information */
+static inline int qsi(struct hws_qsi_info_block *info)
+{
+ int cc;
+ cc = 1;
+
+ asm volatile(
+ "0: .insn s,0xb2860000,0(%1)\n"
+ "1: lhi %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (cc), "+a" (info)
+ : "m" (*info)
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0;
+}
+
+/* Load sampling controls */
+static inline int lsctl(struct hws_lsctl_request_block *req)
+{
+ int cc;
+
+ cc = 1;
+ asm volatile(
+ "0: .insn s,0xb2870000,0(%1)\n"
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (cc), "+a" (req)
+ : "m" (*req)
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0;
+}
+
+/* Sampling control helper functions */
+
+#include <linux/time.h>
+
+static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
+ unsigned long freq)
+{
+ return (USEC_PER_SEC / freq) * qsi->cpu_speed;
+}
+
+static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
+ unsigned long rate)
+{
+ return USEC_PER_SEC * qsi->cpu_speed / rate;
+}
+
+#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
+#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
+
+/* Return TOD timestamp contained in an trailer entry */
+static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
+{
+ /* TOD in STCKE format */
+ if (te->t)
+ return *((unsigned long long *) &te->timestamp[1]);
+
+ /* TOD in STCK format */
+ return *((unsigned long long *) &te->timestamp[0]);
+}
+
+/* Return pointer to trailer entry of an sample data block */
+static inline unsigned long *trailer_entry_ptr(unsigned long v)
+{
+ void *ret;
+
+ ret = (void *) v;
+ ret += PAGE_SIZE;
+ ret -= sizeof(struct hws_trailer_entry);
+
+ return (unsigned long *) ret;
+}
+
+/* Return if the entry in the sample data block table (sdbt)
+ * is a link to the next sdbt */
+static inline int is_link_entry(unsigned long *s)
+{
+ return *s & 0x1ul ? 1 : 0;
+}
+
+/* Return pointer to the linked sdbt */
+static inline unsigned long *get_next_sdbt(unsigned long *s)
+{
+ return (unsigned long *) (*s & ~0x1ul);
+}
#endif /* _ASM_S390_CPU_MF_H */
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index 7e1c917..09d1dd4 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -29,6 +29,8 @@ struct css_general_char {
u32 fcx : 1; /* bit 88 */
u32 : 19;
u32 alt_ssi : 1; /* bit 108 */
+ u32:1;
+ u32 narf:1; /* bit 110 */
} __packed;
extern struct css_general_char css_general_characteristics;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c129ab2..2583466 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *);
void zpci_event_error(void *);
void zpci_event_availability(void *);
void zpci_rescan(void);
+bool zpci_is_enabled(void);
#else /* CONFIG_PCI */
static inline void zpci_event_error(void *e) {}
static inline void zpci_event_availability(void *e) {}
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 1141fb3..159a8ec 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -1,21 +1,40 @@
/*
* Performance event support - s390 specific definitions.
*
- * Copyright IBM Corp. 2009, 2012
+ * Copyright IBM Corp. 2009, 2013
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#include <asm/cpu_mf.h>
+#ifndef _ASM_S390_PERF_EVENT_H
+#define _ASM_S390_PERF_EVENT_H
-/* CPU-measurement counter facility */
-#define PERF_CPUM_CF_MAX_CTR 256
+#ifdef CONFIG_64BIT
+
+#include <linux/perf_event.h>
+#include <linux/device.h>
+#include <asm/cpu_mf.h>
/* Per-CPU flags for PMU states */
#define PMU_F_RESERVED 0x1000
#define PMU_F_ENABLED 0x2000
+#define PMU_F_IN_USE 0x4000
+#define PMU_F_ERR_IBE 0x0100
+#define PMU_F_ERR_LSDA 0x0200
+#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
+
+/* Perf defintions for PMU event attributes in sysfs */
+extern __init const struct attribute_group **cpumf_cf_event_group(void);
+extern ssize_t cpumf_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
+#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name
+#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr)
+
+#define CPUMF_EVENT_ATTR(cat, name, id) \
+ PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show)
+#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name)
-#ifdef CONFIG_64BIT
/* Perf callbacks */
struct pt_regs;
@@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
+/* Perf pt_regs extension for sample-data-entry indicators */
+struct perf_sf_sde_regs {
+ unsigned char in_guest:1; /* guest sample */
+ unsigned long reserved:63; /* reserved */
+};
+
+/* Perf PMU definitions for the counter facility */
+#define PERF_CPUM_CF_MAX_CTR 256
+
+/* Perf PMU definitions for the sampling facility */
+#define PERF_CPUM_SF_MAX_CTR 2
+#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
+#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
+#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
+#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
+#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
+ PERF_CPUM_SF_DIAG_MODE)
+#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
+
+#define REG_NONE 0
+#define REG_OVERFLOW 1
+#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
+#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
+#define RAWSAMPLE_REG(hwc) ((hwc)->config)
+#define TEAR_REG(hwc) ((hwc)->last_tag)
+#define SAMPL_RATE(hwc) ((hwc)->event_base)
+#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
+#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
+#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
+
+/* Structure for sampling data entries to be passed as perf raw sample data
+ * to user space. Note that raw sample data must be aligned and, thus, might
+ * be padded with zeros.
+ */
+struct sf_raw_sample {
+#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
+#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
+ u64 format;
+ u32 size; /* Size of sf_raw_sample */
+ u16 bsdes; /* Basic-sampling data entry size */
+ u16 dsdes; /* Diagnostic-sampling data entry size */
+ struct hws_basic_entry basic; /* Basic-sampling data entry */
+ struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
+ u8 padding[]; /* Padding to next multiple of 8 */
+} __packed;
+
+/* Perf hardware reserve and release functions */
+int perf_reserve_sampling(void);
+void perf_release_sampling(void);
+
#endif /* CONFIG_64BIT */
+#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 57d0d7e..d786c63 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
/**
- * struct qdio_initialize - qdio initalization data
+ * struct qdio_initialize - qdio initialization data
* @cdev: associated ccw device
* @q_format: queue format
* @adapter_name: name for the adapter
@@ -378,6 +378,34 @@ struct qdio_initialize {
struct qdio_outbuf_state *output_sbal_state_array;
};
+/**
+ * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
+ * @l3_ipv6_addr: entry contains IPv6 address
+ * @l3_ipv4_addr: entry contains IPv4 address
+ * @l2_addr_lnid: entry contains MAC address and VLAN ID
+ */
+enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
+
+/**
+ * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
+ * @nit: Network interface token
+ * @addr: Address of one of the three types
+ *
+ * The struct is passed to the callback function by qdio_brinfo_desc()
+ */
+struct qdio_brinfo_entry_l3_ipv6 {
+ u64 nit;
+ struct { unsigned char _s6_addr[16]; } addr;
+} __packed;
+struct qdio_brinfo_entry_l3_ipv4 {
+ u64 nit;
+ struct { uint32_t _s_addr; } addr;
+} __packed;
+struct qdio_brinfo_entry_l2 {
+ u64 nit;
+ struct { u8 mac[6]; u16 lnid; } addr_lnid;
+} __packed;
+
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
@@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
+extern int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv);
#endif /* __QDIO_H__ */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2f39095..220e171 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -52,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
-bool sclp_has_linemode(void);
-bool sclp_has_vt220(void);
+bool __init sclp_has_linemode(void);
+bool __init sclp_has_vt220(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ac9bed8..1607793 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -31,6 +31,7 @@ extern void smp_yield(void);
extern void smp_stop_cpu(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_stop_cpu(void) { }
+static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index e83fc11..f2b18ea 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -154,6 +154,67 @@ struct ica_xcRB {
unsigned short priority_window;
unsigned int status;
} __attribute__((packed));
+
+/**
+ * struct ep11_cprb - EP11 connectivity programming request block
+ * @cprb_len: CPRB header length [0x0020]
+ * @cprb_ver_id: CPRB version id. [0x04]
+ * @pad_000: Alignment pad bytes
+ * @flags: Admin cmd [0x80] or functional cmd [0x00]
+ * @func_id: Function id / subtype [0x5434]
+ * @source_id: Source id [originator id]
+ * @target_id: Target id [usage/ctrl domain id]
+ * @ret_code: Return code
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @payload_len: Payload length
+ */
+struct ep11_cprb {
+ uint16_t cprb_len;
+ unsigned char cprb_ver_id;
+ unsigned char pad_000[2];
+ unsigned char flags;
+ unsigned char func_id[2];
+ uint32_t source_id;
+ uint32_t target_id;
+ uint32_t ret_code;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t payload_len;
+} __attribute__((packed));
+
+/**
+ * struct ep11_target_dev - EP11 target device list
+ * @ap_id: AP device id
+ * @dom_id: Usage domain id
+ */
+struct ep11_target_dev {
+ uint16_t ap_id;
+ uint16_t dom_id;
+};
+
+/**
+ * struct ep11_urb - EP11 user request block
+ * @targets_num: Number of target adapters
+ * @targets: Addr to target adapter list
+ * @weight: Level of request priority
+ * @req_no: Request id/number
+ * @req_len: Request length
+ * @req: Addr to request block
+ * @resp_len: Response length
+ * @resp: Addr to response block
+ */
+struct ep11_urb {
+ uint16_t targets_num;
+ uint64_t targets;
+ uint64_t weight;
+ uint64_t req_no;
+ uint64_t req_len;
+ uint64_t req;
+ uint64_t resp_len;
+ uint64_t resp;
+} __attribute__((packed));
+
#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z'
@@ -183,6 +244,9 @@ struct ica_xcRB {
* ZSECSENDCPRB
* Send an arbitrary CPRB to a crypto card.
*
+ * ZSENDEP11CPRB
+ * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card.
+ *
* Z90STAT_STATUS_MASK
* Return an 64 element array of unsigned chars for the status of
* all devices.
@@ -256,6 +320,7 @@ struct ica_xcRB {
#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
+#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
/* New status calls */
#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2403303..1b3ac09 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
ifdef CONFIG_64BIT
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
+ perf_cpum_cf_events.o
obj-y += runtime_instr.o cache.o
endif
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 95e7ba0..8b84bc3 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
- err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
- (u16 __force __user *)(frame->retcode));
+ if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
+ (u16 __force __user *)(frame->retcode)))
+ goto give_sigsegv;
}
/* Set up backchain. */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e5b43c9..384e609 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
.endm
.macro LPP newpp
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+#if IS_ENABLED(CONFIG_KVM)
tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
jz .+8
.insn s,0xb2800000,\newpp
@@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
.endm
.macro HANDLE_SIE_INTERCEPT scratch,reason
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+#if IS_ENABLED(CONFIG_KVM)
tmhh %r8,0x0001 # interrupting from user ?
jnz .+62
lgr \scratch,%r9
@@ -946,7 +946,7 @@ cleanup_idle_insn:
.quad __critical_end - __critical_start
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+#if IS_ENABLED(CONFIG_KVM)
/*
* sie64a calling convention:
* %r2 pointer to sie control block
@@ -975,7 +975,7 @@ sie_done:
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions beween sie64a and sie_done should not cause program
+# instructions between sie64a and sie_done should not cause program
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
# See also HANDLE_SIE_INTERCEPT
rewind_pad:
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 1105502..f51214c 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void)
goto out;
}
+ cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
new file mode 100644
index 0000000..4554a4b
--- /dev/null
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -0,0 +1,322 @@
+/*
+ * Perf PMU sysfs events attributes for available CPU-measurement counters
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+
+
+/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
+
+CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000);
+CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001);
+CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002);
+CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003);
+CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020);
+CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
+CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
+CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
+CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
+CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
+CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004);
+CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005);
+CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040);
+CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041);
+CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042);
+CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043);
+CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044);
+CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045);
+CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046);
+CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047);
+CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048);
+CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049);
+CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a);
+CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b);
+CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c);
+CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d);
+CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e);
+CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085);
+CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088);
+CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b);
+CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c);
+CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d);
+CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e);
+CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091);
+CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092);
+CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093);
+CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085);
+CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088);
+CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b);
+CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c);
+CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d);
+CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e);
+CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090);
+CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091);
+CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092);
+CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093);
+CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094);
+CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096);
+CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098);
+CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099);
+CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b);
+CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080);
+CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b);
+CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c);
+CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d);
+CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e);
+CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094);
+CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097);
+CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d);
+CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0);
+CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
+CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
+CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
+CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
+
+static struct attribute *cpumcf_pmu_event_attr[] = {
+ CPUMF_EVENT_PTR(cf, CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf, INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES),
+ CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES),
+ CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES),
+ CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT),
+ NULL,
+};
+
+static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES),
+ CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES),
+ CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT),
+ CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES),
+ NULL,
+};
+
+static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES),
+ CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL),
+ NULL,
+};
+
+/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
+
+static struct attribute_group cpumsf_pmu_events_group = {
+ .name = "events",
+ .attrs = cpumcf_pmu_event_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *cpumsf_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cpumsf_pmu_format_group = {
+ .name = "format",
+ .attrs = cpumsf_pmu_format_attr,
+};
+
+static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
+ &cpumsf_pmu_events_group,
+ &cpumsf_pmu_format_group,
+ NULL,
+};
+
+
+static __init struct attribute **merge_attr(struct attribute **a,
+ struct attribute **b)
+{
+ struct attribute **new;
+ int j, i;
+
+ for (j = 0; a[j]; j++)
+ ;
+ for (i = 0; b[i]; i++)
+ j++;
+ j++;
+
+ new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
+ if (!new)
+ return NULL;
+ j = 0;
+ for (i = 0; a[i]; i++)
+ new[j++] = a[i];
+ for (i = 0; b[i]; i++)
+ new[j++] = b[i];
+ new[j] = NULL;
+
+ return new;
+}
+
+__init const struct attribute_group **cpumf_cf_event_group(void)
+{
+ struct attribute **combined, **model;
+ struct cpuid cpu_id;
+
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x2097:
+ case 0x2098:
+ model = cpumcf_z10_pmu_event_attr;
+ break;
+ case 0x2817:
+ case 0x2818:
+ model = cpumcf_z196_pmu_event_attr;
+ break;
+ case 0x2827:
+ case 0x2828:
+ model = cpumcf_zec12_pmu_event_attr;
+ break;
+ default:
+ model = NULL;
+ break;
+ };
+
+ if (!model)
+ goto out;
+
+ combined = merge_attr(cpumcf_pmu_event_attr, model);
+ if (combined)
+ cpumsf_pmu_events_group.attrs = combined;
+out:
+ return cpumsf_pmu_attr_groups;
+}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
new file mode 100644
index 0000000..6c0d298
--- /dev/null
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -0,0 +1,1641 @@
+/*
+ * Performance event support for the System z CPU-measurement Sampling Facility
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#define KMSG_COMPONENT "cpum_sf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <asm/cpu_mf.h>
+#include <asm/irq.h>
+#include <asm/debug.h>
+#include <asm/timex.h>
+
+/* Minimum number of sample-data-block-tables:
+ * At least one table is required for the sampling buffer structure.
+ * A single table contains up to 511 pointers to sample-data-blocks.
+ */
+#define CPUM_SF_MIN_SDBT 1
+
+/* Number of sample-data-blocks per sample-data-block-table (SDBT):
+ * A table contains SDB pointers (8 bytes) and one table-link entry
+ * that points to the origin of the next SDBT.
+ */
+#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8)
+
+/* Maximum page offset for an SDBT table-link entry:
+ * If this page offset is reached, a table-link entry to the next SDBT
+ * must be added.
+ */
+#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8)
+static inline int require_table_link(const void *sdbt)
+{
+ return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
+}
+
+/* Minimum and maximum sampling buffer sizes:
+ *
+ * This number represents the maximum size of the sampling buffer taking
+ * the number of sample-data-block-tables into account. Note that these
+ * numbers apply to the basic-sampling function only.
+ * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if
+ * the diagnostic-sampling function is active.
+ *
+ * Sampling buffer size Buffer characteristics
+ * ---------------------------------------------------
+ * 64KB == 16 pages (4KB per page)
+ * 1 page for SDB-tables
+ * 15 pages for SDBs
+ *
+ * 32MB == 8192 pages (4KB per page)
+ * 16 pages for SDB-tables
+ * 8176 pages for SDBs
+ */
+static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15;
+static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176;
+static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1;
+
+struct sf_buffer {
+ unsigned long *sdbt; /* Sample-data-block-table origin */
+ /* buffer characteristics (required for buffer increments) */
+ unsigned long num_sdb; /* Number of sample-data-blocks */
+ unsigned long num_sdbt; /* Number of sample-data-block-tables */
+ unsigned long *tail; /* last sample-data-block-table */
+};
+
+struct cpu_hw_sf {
+ /* CPU-measurement sampling information block */
+ struct hws_qsi_info_block qsi;
+ /* CPU-measurement sampling control block */
+ struct hws_lsctl_request_block lsctl;
+ struct sf_buffer sfb; /* Sampling buffer */
+ unsigned int flags; /* Status flags */
+ struct perf_event *event; /* Scheduled perf event */
+};
+static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
+
+/* Debug feature */
+static debug_info_t *sfdbg;
+
+/*
+ * sf_disable() - Switch off sampling facility
+ */
+static int sf_disable(void)
+{
+ struct hws_lsctl_request_block sreq;
+
+ memset(&sreq, 0, sizeof(sreq));
+ return lsctl(&sreq);
+}
+
+/*
+ * sf_buffer_available() - Check for an allocated sampling buffer
+ */
+static int sf_buffer_available(struct cpu_hw_sf *cpuhw)
+{
+ return !!cpuhw->sfb.sdbt;
+}
+
+/*
+ * deallocate sampling facility buffer
+ */
+static void free_sampling_buffer(struct sf_buffer *sfb)
+{
+ unsigned long *sdbt, *curr;
+
+ if (!sfb->sdbt)
+ return;
+
+ sdbt = sfb->sdbt;
+ curr = sdbt;
+
+ /* Free the SDBT after all SDBs are processed... */
+ while (1) {
+ if (!*curr || !sdbt)
+ break;
+
+ /* Process table-link entries */
+ if (is_link_entry(curr)) {
+ curr = get_next_sdbt(curr);
+ if (sdbt)
+ free_page((unsigned long) sdbt);
+
+ /* If the origin is reached, sampling buffer is freed */
+ if (curr == sfb->sdbt)
+ break;
+ else
+ sdbt = curr;
+ } else {
+ /* Process SDB pointer */
+ if (*curr) {
+ free_page(*curr);
+ curr++;
+ }
+ }
+ }
+
+ debug_sprintf_event(sfdbg, 5,
+ "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt);
+ memset(sfb, 0, sizeof(*sfb));
+}
+
+static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
+{
+ unsigned long sdb, *trailer;
+
+ /* Allocate and initialize sample-data-block */
+ sdb = get_zeroed_page(gfp_flags);
+ if (!sdb)
+ return -ENOMEM;
+ trailer = trailer_entry_ptr(sdb);
+ *trailer = SDB_TE_ALERT_REQ_MASK;
+
+ /* Link SDB into the sample-data-block-table */
+ *sdbt = sdb;
+
+ return 0;
+}
+
+/*
+ * realloc_sampling_buffer() - extend sampler memory
+ *
+ * Allocates new sample-data-blocks and adds them to the specified sampling
+ * buffer memory.
+ *
+ * Important: This modifies the sampling buffer and must be called when the
+ * sampling facility is disabled.
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int realloc_sampling_buffer(struct sf_buffer *sfb,
+ unsigned long num_sdb, gfp_t gfp_flags)
+{
+ int i, rc;
+ unsigned long *new, *tail;
+
+ if (!sfb->sdbt || !sfb->tail)
+ return -EINVAL;
+
+ if (!is_link_entry(sfb->tail))
+ return -EINVAL;
+
+ /* Append to the existing sampling buffer, overwriting the table-link
+ * register.
+ * The tail variables always points to the "tail" (last and table-link)
+ * entry in an SDB-table.
+ */
+ tail = sfb->tail;
+
+ /* Do a sanity check whether the table-link entry points to
+ * the sampling buffer origin.
+ */
+ if (sfb->sdbt != get_next_sdbt(tail)) {
+ debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: "
+ "sampling buffer is not linked: origin=%p"
+ "tail=%p\n",
+ (void *) sfb->sdbt, (void *) tail);
+ return -EINVAL;
+ }
+
+ /* Allocate remaining SDBs */
+ rc = 0;
+ for (i = 0; i < num_sdb; i++) {
+ /* Allocate a new SDB-table if it is full. */
+ if (require_table_link(tail)) {
+ new = (unsigned long *) get_zeroed_page(gfp_flags);
+ if (!new) {
+ rc = -ENOMEM;
+ break;
+ }
+ sfb->num_sdbt++;
+ /* Link current page to tail of chain */
+ *tail = (unsigned long)(void *) new + 1;
+ tail = new;
+ }
+
+ /* Allocate a new sample-data-block.
+ * If there is not enough memory, stop the realloc process
+ * and simply use what was allocated. If this is a temporary
+ * issue, a new realloc call (if required) might succeed.
+ */
+ rc = alloc_sample_data_block(tail, gfp_flags);
+ if (rc)
+ break;
+ sfb->num_sdb++;
+ tail++;
+ }
+
+ /* Link sampling buffer to its origin */
+ *tail = (unsigned long) sfb->sdbt + 1;
+ sfb->tail = tail;
+
+ debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
+ " settings: sdbt=%lu sdb=%lu\n",
+ sfb->num_sdbt, sfb->num_sdb);
+ return rc;
+}
+
+/*
+ * allocate_sampling_buffer() - allocate sampler memory
+ *
+ * Allocates and initializes a sampling buffer structure using the
+ * specified number of sample-data-blocks (SDB). For each allocation,
+ * a 4K page is used. The number of sample-data-block-tables (SDBT)
+ * are calculated from SDBs.
+ * Also set the ALERT_REQ mask in each SDBs trailer.
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
+{
+ int rc;
+
+ if (sfb->sdbt)
+ return -EINVAL;
+
+ /* Allocate the sample-data-block-table origin */
+ sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!sfb->sdbt)
+ return -ENOMEM;
+ sfb->num_sdb = 0;
+ sfb->num_sdbt = 1;
+
+ /* Link the table origin to point to itself to prepare for
+ * realloc_sampling_buffer() invocation.
+ */
+ sfb->tail = sfb->sdbt;
+ *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1;
+
+ /* Allocate requested number of sample-data-blocks */
+ rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
+ if (rc) {
+ free_sampling_buffer(sfb);
+ debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
+ "realloc_sampling_buffer failed with rc=%i\n", rc);
+ } else
+ debug_sprintf_event(sfdbg, 4,
+ "alloc_sampling_buffer: tear=%p dear=%p\n",
+ sfb->sdbt, (void *) *sfb->sdbt);
+ return rc;
+}
+
+static void sfb_set_limits(unsigned long min, unsigned long max)
+{
+ struct hws_qsi_info_block si;
+
+ CPUM_SF_MIN_SDB = min;
+ CPUM_SF_MAX_SDB = max;
+
+ memset(&si, 0, sizeof(si));
+ if (!qsi(&si))
+ CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
+}
+
+static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
+{
+ return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR
+ : CPUM_SF_MAX_SDB;
+}
+
+static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
+ struct hw_perf_event *hwc)
+{
+ if (!sfb->sdbt)
+ return SFB_ALLOC_REG(hwc);
+ if (SFB_ALLOC_REG(hwc) > sfb->num_sdb)
+ return SFB_ALLOC_REG(hwc) - sfb->num_sdb;
+ return 0;
+}
+
+static int sfb_has_pending_allocs(struct sf_buffer *sfb,
+ struct hw_perf_event *hwc)
+{
+ return sfb_pending_allocs(sfb, hwc) > 0;
+}
+
+static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
+{
+ /* Limit the number of SDBs to not exceed the maximum */
+ num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc));
+ if (num)
+ SFB_ALLOC_REG(hwc) += num;
+}
+
+static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
+{
+ SFB_ALLOC_REG(hwc) = 0;
+ sfb_account_allocs(num, hwc);
+}
+
+static size_t event_sample_size(struct hw_perf_event *hwc)
+{
+ struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
+ size_t sample_size;
+
+ /* The sample size depends on the sampling function: The basic-sampling
+ * function must be always enabled, diagnostic-sampling function is
+ * optional.
+ */
+ sample_size = sfr->bsdes;
+ if (SAMPL_DIAG_MODE(hwc))
+ sample_size += sfr->dsdes;
+
+ return sample_size;
+}
+
+static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
+{
+ if (cpuhw->sfb.sdbt)
+ free_sampling_buffer(&cpuhw->sfb);
+}
+
+static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
+{
+ unsigned long n_sdb, freq, factor;
+ size_t sfr_size, sample_size;
+ struct sf_raw_sample *sfr;
+
+ /* Allocate raw sample buffer
+ *
+ * The raw sample buffer is used to temporarily store sampling data
+ * entries for perf raw sample processing. The buffer size mainly
+ * depends on the size of diagnostic-sampling data entries which is
+ * machine-specific. The exact size calculation includes:
+ * 1. The first 4 bytes of diagnostic-sampling data entries are
+ * already reflected in the sf_raw_sample structure. Subtract
+ * these bytes.
+ * 2. The perf raw sample data must be 8-byte aligned (u64) and
+ * perf's internal data size must be considered too. So add
+ * an additional u32 for correct alignment and subtract before
+ * allocating the buffer.
+ * 3. Store the raw sample buffer pointer in the perf event
+ * hardware structure.
+ */
+ sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) +
+ sizeof(u32), sizeof(u64));
+ sfr_size -= sizeof(u32);
+ sfr = kzalloc(sfr_size, GFP_KERNEL);
+ if (!sfr)
+ return -ENOMEM;
+ sfr->size = sfr_size;
+ sfr->bsdes = cpuhw->qsi.bsdes;
+ sfr->dsdes = cpuhw->qsi.dsdes;
+ RAWSAMPLE_REG(hwc) = (unsigned long) sfr;
+
+ /* Calculate sampling buffers using 4K pages
+ *
+ * 1. Determine the sample data size which depends on the used
+ * sampling functions, for example, basic-sampling or
+ * basic-sampling with diagnostic-sampling.
+ *
+ * 2. Use the sampling frequency as input. The sampling buffer is
+ * designed for almost one second. This can be adjusted through
+ * the "factor" variable.
+ * In any case, alloc_sampling_buffer() sets the Alert Request
+ * Control indicator to trigger a measurement-alert to harvest
+ * sample-data-blocks (sdb).
+ *
+ * 3. Compute the number of sample-data-blocks and ensure a minimum
+ * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not
+ * exceed a "calculated" maximum. The symbolic maximum is
+ * designed for basic-sampling only and needs to be increased if
+ * diagnostic-sampling is active.
+ * See also the remarks for these symbolic constants.
+ *
+ * 4. Compute the number of sample-data-block-tables (SDBT) and
+ * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
+ * to 511 SDBs).
+ */
+ sample_size = event_sample_size(hwc);
+ freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
+ factor = 1;
+ n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
+ if (n_sdb < CPUM_SF_MIN_SDB)
+ n_sdb = CPUM_SF_MIN_SDB;
+
+ /* If there is already a sampling buffer allocated, it is very likely
+ * that the sampling facility is enabled too. If the event to be
+ * initialized requires a greater sampling buffer, the allocation must
+ * be postponed. Changing the sampling buffer requires the sampling
+ * facility to be in the disabled state. So, account the number of
+ * required SDBs and let cpumsf_pmu_enable() resize the buffer just
+ * before the event is started.
+ */
+ sfb_init_allocs(n_sdb, hwc);
+ if (sf_buffer_available(cpuhw))
+ return 0;
+
+ debug_sprintf_event(sfdbg, 3,
+ "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu"
+ " sample_size=%lu cpuhw=%p\n",
+ SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
+ sample_size, cpuhw);
+
+ return alloc_sampling_buffer(&cpuhw->sfb,
+ sfb_pending_allocs(&cpuhw->sfb, hwc));
+}
+
+static unsigned long min_percent(unsigned int percent, unsigned long base,
+ unsigned long min)
+{
+ return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100));
+}
+
+static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base)
+{
+ /* Use a percentage-based approach to extend the sampling facility
+ * buffer. Accept up to 5% sample data loss.
+ * Vary the extents between 1% to 5% of the current number of
+ * sample-data-blocks.
+ */
+ if (ratio <= 5)
+ return 0;
+ if (ratio <= 25)
+ return min_percent(1, base, 1);
+ if (ratio <= 50)
+ return min_percent(1, base, 1);
+ if (ratio <= 75)
+ return min_percent(2, base, 2);
+ if (ratio <= 100)
+ return min_percent(3, base, 3);
+ if (ratio <= 250)
+ return min_percent(4, base, 4);
+
+ return min_percent(5, base, 8);
+}
+
+static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
+ struct hw_perf_event *hwc)
+{
+ unsigned long ratio, num;
+
+ if (!OVERFLOW_REG(hwc))
+ return;
+
+ /* The sample_overflow contains the average number of sample data
+ * that has been lost because sample-data-blocks were full.
+ *
+ * Calculate the total number of sample data entries that has been
+ * discarded. Then calculate the ratio of lost samples to total samples
+ * per second in percent.
+ */
+ ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb,
+ sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)));
+
+ /* Compute number of sample-data-blocks */
+ num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb);
+ if (num)
+ sfb_account_allocs(num, hwc);
+
+ debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu"
+ " num=%lu\n", OVERFLOW_REG(hwc), ratio, num);
+ OVERFLOW_REG(hwc) = 0;
+}
+
+/* extend_sampling_buffer() - Extend sampling buffer
+ * @sfb: Sampling buffer structure (for local CPU)
+ * @hwc: Perf event hardware structure
+ *
+ * Use this function to extend the sampling buffer based on the overflow counter
+ * and postponed allocation extents stored in the specified Perf event hardware.
+ *
+ * Important: This function disables the sampling facility in order to safely
+ * change the sampling buffer structure. Do not call this function
+ * when the PMU is active.
+ */
+static void extend_sampling_buffer(struct sf_buffer *sfb,
+ struct hw_perf_event *hwc)
+{
+ unsigned long num, num_old;
+ int rc;
+
+ num = sfb_pending_allocs(sfb, hwc);
+ if (!num)
+ return;
+ num_old = sfb->num_sdb;
+
+ /* Disable the sampling facility to reset any states and also
+ * clear pending measurement alerts.
+ */
+ sf_disable();
+
+ /* Extend the sampling buffer.
+ * This memory allocation typically happens in an atomic context when
+ * called by perf. Because this is a reallocation, it is fine if the
+ * new SDB-request cannot be satisfied immediately.
+ */
+ rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
+ if (rc)
+ debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
+ "failed with rc=%i\n", rc);
+
+ if (sfb_has_pending_allocs(sfb, hwc))
+ debug_sprintf_event(sfdbg, 5, "sfb: extend: "
+ "req=%lu alloc=%lu remaining=%lu\n",
+ num, sfb->num_sdb - num_old,
+ sfb_pending_allocs(sfb, hwc));
+}
+
+
+/* Number of perf events counting hardware events */
+static atomic_t num_events;
+/* Used to avoid races in calling reserve/release_cpumf_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+#define PMC_INIT 0
+#define PMC_RELEASE 1
+#define PMC_FAILURE 2
+static void setup_pmc_cpu(void *flags)
+{
+ int err;
+ struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf);
+
+ err = 0;
+ switch (*((int *) flags)) {
+ case PMC_INIT:
+ memset(cpusf, 0, sizeof(*cpusf));
+ err = qsi(&cpusf->qsi);
+ if (err)
+ break;
+ cpusf->flags |= PMU_F_RESERVED;
+ err = sf_disable();
+ if (err)
+ pr_err("Switching off the sampling facility failed "
+ "with rc=%i\n", err);
+ debug_sprintf_event(sfdbg, 5,
+ "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf);
+ break;
+ case PMC_RELEASE:
+ cpusf->flags &= ~PMU_F_RESERVED;
+ err = sf_disable();
+ if (err) {
+ pr_err("Switching off the sampling facility failed "
+ "with rc=%i\n", err);
+ } else
+ deallocate_buffers(cpusf);
+ debug_sprintf_event(sfdbg, 5,
+ "setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
+ break;
+ }
+ if (err)
+ *((int *) flags) |= PMC_FAILURE;
+}
+
+static void release_pmc_hardware(void)
+{
+ int flags = PMC_RELEASE;
+
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+ on_each_cpu(setup_pmc_cpu, &flags, 1);
+ perf_release_sampling();
+}
+
+static int reserve_pmc_hardware(void)
+{
+ int flags = PMC_INIT;
+ int err;
+
+ err = perf_reserve_sampling();
+ if (err)
+ return err;
+ on_each_cpu(setup_pmc_cpu, &flags, 1);
+ if (flags & PMC_FAILURE) {
+ release_pmc_hardware();
+ return -ENODEV;
+ }
+ irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+
+ return 0;
+}
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ /* Free raw sample buffer */
+ if (RAWSAMPLE_REG(&event->hw))
+ kfree((void *) RAWSAMPLE_REG(&event->hw));
+
+ /* Release PMC if this is the last perf event */
+ if (!atomic_add_unless(&num_events, -1, 1)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_dec_return(&num_events) == 0)
+ release_pmc_hardware();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+}
+
+static void hw_init_period(struct hw_perf_event *hwc, u64 period)
+{
+ hwc->sample_period = period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+}
+
+static void hw_reset_registers(struct hw_perf_event *hwc,
+ unsigned long *sdbt_origin)
+{
+ struct sf_raw_sample *sfr;
+
+ /* (Re)set to first sample-data-block-table */
+ TEAR_REG(hwc) = (unsigned long) sdbt_origin;
+
+ /* (Re)set raw sampling buffer register */
+ sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
+ memset(&sfr->basic, 0, sizeof(sfr->basic));
+ memset(&sfr->diag, 0, sfr->dsdes);
+}
+
+static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
+ unsigned long rate)
+{
+ return clamp_t(unsigned long, rate,
+ si->min_sampl_rate, si->max_sampl_rate);
+}
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct cpu_hw_sf *cpuhw;
+ struct hws_qsi_info_block si;
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long rate;
+ int cpu, err;
+
+ /* Reserve CPU-measurement sampling facility */
+ err = 0;
+ if (!atomic_inc_not_zero(&num_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
+ err = -EBUSY;
+ else
+ atomic_inc(&num_events);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ goto out;
+
+ /* Access per-CPU sampling information (query sampling info) */
+ /*
+ * The event->cpu value can be -1 to count on every CPU, for example,
+ * when attaching to a task. If this is specified, use the query
+ * sampling info from the current CPU, otherwise use event->cpu to
+ * retrieve the per-CPU information.
+ * Later, cpuhw indicates whether to allocate sampling buffers for a
+ * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL).
+ */
+ memset(&si, 0, sizeof(si));
+ cpuhw = NULL;
+ if (event->cpu == -1)
+ qsi(&si);
+ else {
+ /* Event is pinned to a particular CPU, retrieve the per-CPU
+ * sampling structure for accessing the CPU-specific QSI.
+ */
+ cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
+ si = cpuhw->qsi;
+ }
+
+ /* Check sampling facility authorization and, if not authorized,
+ * fall back to other PMUs. It is safe to check any CPU because
+ * the authorization is identical for all configured CPUs.
+ */
+ if (!si.as) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* Always enable basic sampling */
+ SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
+
+ /* Check if diagnostic sampling is requested. Deny if the required
+ * sampling authorization is missing.
+ */
+ if (attr->config == PERF_EVENT_CPUM_SF_DIAG) {
+ if (!si.ad) {
+ err = -EPERM;
+ goto out;
+ }
+ SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
+ }
+
+ /* Check and set other sampling flags */
+ if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
+ SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
+
+ /* The sampling information (si) contains information about the
+ * min/max sampling intervals and the CPU speed. So calculate the
+ * correct sampling interval and avoid the whole period adjust
+ * feedback loop.
+ */
+ rate = 0;
+ if (attr->freq) {
+ rate = freq_to_sample_rate(&si, attr->sample_freq);
+ rate = hw_limit_rate(&si, rate);
+ attr->freq = 0;
+ attr->sample_period = rate;
+ } else {
+ /* The min/max sampling rates specifies the valid range
+ * of sample periods. If the specified sample period is
+ * out of range, limit the period to the range boundary.
+ */
+ rate = hw_limit_rate(&si, hwc->sample_period);
+
+ /* The perf core maintains a maximum sample rate that is
+ * configurable through the sysctl interface. Ensure the
+ * sampling rate does not exceed this value. This also helps
+ * to avoid throttling when pushing samples with
+ * perf_event_overflow().
+ */
+ if (sample_rate_to_freq(&si, rate) >
+ sysctl_perf_event_sample_rate) {
+ err = -EINVAL;
+ debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n");
+ goto out;
+ }
+ }
+ SAMPL_RATE(hwc) = rate;
+ hw_init_period(hwc, SAMPL_RATE(hwc));
+
+ /* Initialize sample data overflow accounting */
+ hwc->extra_reg.reg = REG_OVERFLOW;
+ OVERFLOW_REG(hwc) = 0;
+
+ /* Allocate the per-CPU sampling buffer using the CPU information
+ * from the event. If the event is not pinned to a particular
+ * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
+ * buffers for each online CPU.
+ */
+ if (cpuhw)
+ /* Event is pinned to a particular CPU */
+ err = allocate_buffers(cpuhw, hwc);
+ else {
+ /* Event is not pinned, allocate sampling buffer on
+ * each online CPU
+ */
+ for_each_online_cpu(cpu) {
+ cpuhw = &per_cpu(cpu_hw_sf, cpu);
+ err = allocate_buffers(cpuhw, hwc);
+ if (err)
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+static int cpumsf_pmu_event_init(struct perf_event *event)
+{
+ int err;
+
+ /* No support for taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ if ((event->attr.config != PERF_EVENT_CPUM_SF) &&
+ (event->attr.config != PERF_EVENT_CPUM_SF_DIAG))
+ return -ENOENT;
+ break;
+ case PERF_TYPE_HARDWARE:
+ /* Support sampling of CPU cycles in addition to the
+ * counter facility. However, the counter facility
+ * is more precise and, hence, restrict this PMU to
+ * sampling events only.
+ */
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES)
+ return -ENOENT;
+ if (!is_sampling_event(event))
+ return -ENOENT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ /* Check online status of the CPU to which the event is pinned */
+ if (event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return -ENODEV;
+
+ /* Force reset of idle/hv excludes regardless of what the
+ * user requested.
+ */
+ if (event->attr.exclude_hv)
+ event->attr.exclude_hv = 0;
+ if (event->attr.exclude_idle)
+ event->attr.exclude_idle = 0;
+
+ err = __hw_perf_event_init(event);
+ if (unlikely(err))
+ if (event->destroy)
+ event->destroy(event);
+ return err;
+}
+
+static void cpumsf_pmu_enable(struct pmu *pmu)
+{
+ struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct hw_perf_event *hwc;
+ int err;
+
+ if (cpuhw->flags & PMU_F_ENABLED)
+ return;
+
+ if (cpuhw->flags & PMU_F_ERR_MASK)
+ return;
+
+ /* Check whether to extent the sampling buffer.
+ *
+ * Two conditions trigger an increase of the sampling buffer for a
+ * perf event:
+ * 1. Postponed buffer allocations from the event initialization.
+ * 2. Sampling overflows that contribute to pending allocations.
+ *
+ * Note that the extend_sampling_buffer() function disables the sampling
+ * facility, but it can be fully re-enabled using sampling controls that
+ * have been saved in cpumsf_pmu_disable().
+ */
+ if (cpuhw->event) {
+ hwc = &cpuhw->event->hw;
+ /* Account number of overflow-designated buffer extents */
+ sfb_account_overflows(cpuhw, hwc);
+ if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
+ extend_sampling_buffer(&cpuhw->sfb, hwc);
+ }
+
+ /* (Re)enable the PMU and sampling facility */
+ cpuhw->flags |= PMU_F_ENABLED;
+ barrier();
+
+ err = lsctl(&cpuhw->lsctl);
+ if (err) {
+ cpuhw->flags &= ~PMU_F_ENABLED;
+ pr_err("Loading sampling controls failed: op=%i err=%i\n",
+ 1, err);
+ return;
+ }
+
+ debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
+ "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs,
+ cpuhw->lsctl.ed, cpuhw->lsctl.cd,
+ (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear);
+}
+
+static void cpumsf_pmu_disable(struct pmu *pmu)
+{
+ struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct hws_lsctl_request_block inactive;
+ struct hws_qsi_info_block si;
+ int err;
+
+ if (!(cpuhw->flags & PMU_F_ENABLED))
+ return;
+
+ if (cpuhw->flags & PMU_F_ERR_MASK)
+ return;
+
+ /* Switch off sampling activation control */
+ inactive = cpuhw->lsctl;
+ inactive.cs = 0;
+ inactive.cd = 0;
+
+ err = lsctl(&inactive);
+ if (err) {
+ pr_err("Loading sampling controls failed: op=%i err=%i\n",
+ 2, err);
+ return;
+ }
+
+ /* Save state of TEAR and DEAR register contents */
+ if (!qsi(&si)) {
+ /* TEAR/DEAR values are valid only if the sampling facility is
+ * enabled. Note that cpumsf_pmu_disable() might be called even
+ * for a disabled sampling facility because cpumsf_pmu_enable()
+ * controls the enable/disable state.
+ */
+ if (si.es) {
+ cpuhw->lsctl.tear = si.tear;
+ cpuhw->lsctl.dear = si.dear;
+ }
+ } else
+ debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
+ "qsi() failed with err=%i\n", err);
+
+ cpuhw->flags &= ~PMU_F_ENABLED;
+}
+
+/* perf_exclude_event() - Filter event
+ * @event: The perf event
+ * @regs: pt_regs structure
+ * @sde_regs: Sample-data-entry (sde) regs structure
+ *
+ * Filter perf events according to their exclude specification.
+ *
+ * Return non-zero if the event shall be excluded.
+ */
+static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs,
+ struct perf_sf_sde_regs *sde_regs)
+{
+ if (event->attr.exclude_user && user_mode(regs))
+ return 1;
+ if (event->attr.exclude_kernel && !user_mode(regs))
+ return 1;
+ if (event->attr.exclude_guest && sde_regs->in_guest)
+ return 1;
+ if (event->attr.exclude_host && !sde_regs->in_guest)
+ return 1;
+ return 0;
+}
+
+/* perf_push_sample() - Push samples to perf
+ * @event: The perf event
+ * @sample: Hardware sample data
+ *
+ * Use the hardware sample data to create perf event sample. The sample
+ * is the pushed to the event subsystem and the function checks for
+ * possible event overflows. If an event overflow occurs, the PMU is
+ * stopped.
+ *
+ * Return non-zero if an event overflow occurred.
+ */
+static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
+{
+ int overflow;
+ struct pt_regs regs;
+ struct perf_sf_sde_regs *sde_regs;
+ struct perf_sample_data data;
+ struct perf_raw_record raw;
+
+ /* Setup perf sample */
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ raw.size = sfr->size;
+ raw.data = sfr;
+ data.raw = &raw;
+
+ /* Setup pt_regs to look like an CPU-measurement external interrupt
+ * using the Program Request Alert code. The regs.int_parm_long
+ * field which is unused contains additional sample-data-entry related
+ * indicators.
+ */
+ memset(&regs, 0, sizeof(regs));
+ regs.int_code = 0x1407;
+ regs.int_parm = CPU_MF_INT_SF_PRA;
+ sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
+
+ regs.psw.addr = sfr->basic.ia;
+ if (sfr->basic.T)
+ regs.psw.mask |= PSW_MASK_DAT;
+ if (sfr->basic.W)
+ regs.psw.mask |= PSW_MASK_WAIT;
+ if (sfr->basic.P)
+ regs.psw.mask |= PSW_MASK_PSTATE;
+ switch (sfr->basic.AS) {
+ case 0x0:
+ regs.psw.mask |= PSW_ASC_PRIMARY;
+ break;
+ case 0x1:
+ regs.psw.mask |= PSW_ASC_ACCREG;
+ break;
+ case 0x2:
+ regs.psw.mask |= PSW_ASC_SECONDARY;
+ break;
+ case 0x3:
+ regs.psw.mask |= PSW_ASC_HOME;
+ break;
+ }
+
+ /* The host-program-parameter (hpp) contains the sie control
+ * block that is set by sie64a() in entry64.S. Check if hpp
+ * refers to a valid control block and set sde_regs flags
+ * accordingly. This would allow to use hpp values for other
+ * purposes too.
+ * For now, simply use a non-zero value as guest indicator.
+ */
+ if (sfr->basic.hpp)
+ sde_regs->in_guest = 1;
+
+ overflow = 0;
+ if (perf_exclude_event(event, &regs, sde_regs))
+ goto out;
+ if (perf_event_overflow(event, &data, &regs)) {
+ overflow = 1;
+ event->pmu->stop(event, 0);
+ }
+ perf_event_update_userpage(event);
+out:
+ return overflow;
+}
+
+static void perf_event_count_update(struct perf_event *event, u64 count)
+{
+ local64_add(count, &event->count);
+}
+
+static int sample_format_is_valid(struct hws_combined_entry *sample,
+ unsigned int flags)
+{
+ if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
+ /* Only basic-sampling data entries with data-entry-format
+ * version of 0x0001 can be processed.
+ */
+ if (sample->basic.def != 0x0001)
+ return 0;
+ if (flags & PERF_CPUM_SF_DIAG_MODE)
+ /* The data-entry-format number of diagnostic-sampling data
+ * entries can vary. Because diagnostic data is just passed
+ * through, do only a sanity check on the DEF.
+ */
+ if (sample->diag.def < 0x8001)
+ return 0;
+ return 1;
+}
+
+static int sample_is_consistent(struct hws_combined_entry *sample,
+ unsigned long flags)
+{
+ /* This check applies only to basic-sampling data entries of potentially
+ * combined-sampling data entries. Invalid entries cannot be processed
+ * by the PMU and, thus, do not deliver an associated
+ * diagnostic-sampling data entry.
+ */
+ if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE)))
+ return 0;
+ /*
+ * Samples are skipped, if they are invalid or for which the
+ * instruction address is not predictable, i.e., the wait-state bit is
+ * set.
+ */
+ if (sample->basic.I || sample->basic.W)
+ return 0;
+ return 1;
+}
+
+static void reset_sample_slot(struct hws_combined_entry *sample,
+ unsigned long flags)
+{
+ if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
+ sample->basic.def = 0;
+ if (flags & PERF_CPUM_SF_DIAG_MODE)
+ sample->diag.def = 0;
+}
+
+static void sfr_store_sample(struct sf_raw_sample *sfr,
+ struct hws_combined_entry *sample)
+{
+ if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE))
+ sfr->basic = sample->basic;
+ if (sfr->format & PERF_CPUM_SF_DIAG_MODE)
+ memcpy(&sfr->diag, &sample->diag, sfr->dsdes);
+}
+
+static void debug_sample_entry(struct hws_combined_entry *sample,
+ struct hws_trailer_entry *te,
+ unsigned long flags)
+{
+ debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
+ "sampling data entry: te->f=%i basic.def=%04x (%p)"
+ " diag.def=%04x (%p)\n", te->f,
+ sample->basic.def, &sample->basic,
+ (flags & PERF_CPUM_SF_DIAG_MODE)
+ ? sample->diag.def : 0xFFFF,
+ (flags & PERF_CPUM_SF_DIAG_MODE)
+ ? &sample->diag : NULL);
+}
+
+/* hw_collect_samples() - Walk through a sample-data-block and collect samples
+ * @event: The perf event
+ * @sdbt: Sample-data-block table
+ * @overflow: Event overflow counter
+ *
+ * Walks through a sample-data-block and collects sampling data entries that are
+ * then pushed to the perf event subsystem. Depending on the sampling function,
+ * there can be either basic-sampling or combined-sampling data entries. A
+ * combined-sampling data entry consists of a basic- and a diagnostic-sampling
+ * data entry. The sampling function is determined by the flags in the perf
+ * event hardware structure. The function always works with a combined-sampling
+ * data entry but ignores the the diagnostic portion if it is not available.
+ *
+ * Note that the implementation focuses on basic-sampling data entries and, if
+ * such an entry is not valid, the entire combined-sampling data entry is
+ * ignored.
+ *
+ * The overflow variables counts the number of samples that has been discarded
+ * due to a perf event overflow.
+ */
+static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
+ unsigned long long *overflow)
+{
+ unsigned long flags = SAMPL_FLAGS(&event->hw);
+ struct hws_combined_entry *sample;
+ struct hws_trailer_entry *te;
+ struct sf_raw_sample *sfr;
+ size_t sample_size;
+
+ /* Prepare and initialize raw sample data */
+ sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw);
+ sfr->format = flags & PERF_CPUM_SF_MODE_MASK;
+
+ sample_size = event_sample_size(&event->hw);
+ te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
+ sample = (struct hws_combined_entry *) *sdbt;
+ while ((unsigned long *) sample < (unsigned long *) te) {
+ /* Check for an empty sample */
+ if (!sample->basic.def)
+ break;
+
+ /* Update perf event period */
+ perf_event_count_update(event, SAMPL_RATE(&event->hw));
+
+ /* Check sampling data entry */
+ if (sample_format_is_valid(sample, flags)) {
+ /* If an event overflow occurred, the PMU is stopped to
+ * throttle event delivery. Remaining sample data is
+ * discarded.
+ */
+ if (!*overflow) {
+ if (sample_is_consistent(sample, flags)) {
+ /* Deliver sample data to perf */
+ sfr_store_sample(sfr, sample);
+ *overflow = perf_push_sample(event, sfr);
+ }
+ } else
+ /* Count discarded samples */
+ *overflow += 1;
+ } else {
+ debug_sample_entry(sample, te, flags);
+ /* Sample slot is not yet written or other record.
+ *
+ * This condition can occur if the buffer was reused
+ * from a combined basic- and diagnostic-sampling.
+ * If only basic-sampling is then active, entries are
+ * written into the larger diagnostic entries.
+ * This is typically the case for sample-data-blocks
+ * that are not full. Stop processing if the first
+ * invalid format was detected.
+ */
+ if (!te->f)
+ break;
+ }
+
+ /* Reset sample slot and advance to next sample */
+ reset_sample_slot(sample, flags);
+ sample += sample_size;
+ }
+}
+
+/* hw_perf_event_update() - Process sampling buffer
+ * @event: The perf event
+ * @flush_all: Flag to also flush partially filled sample-data-blocks
+ *
+ * Processes the sampling buffer and create perf event samples.
+ * The sampling buffer position are retrieved and saved in the TEAR_REG
+ * register of the specified perf event.
+ *
+ * Only full sample-data-blocks are processed. Specify the flash_all flag
+ * to also walk through partially filled sample-data-blocks. It is ignored
+ * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag
+ * enforces the processing of full sample-data-blocks only (trailer entries
+ * with the block-full-indicator bit set).
+ */
+static void hw_perf_event_update(struct perf_event *event, int flush_all)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hws_trailer_entry *te;
+ unsigned long *sdbt;
+ unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
+ int done;
+
+ if (flush_all && SDB_FULL_BLOCKS(hwc))
+ flush_all = 0;
+
+ sdbt = (unsigned long *) TEAR_REG(hwc);
+ done = event_overflow = sampl_overflow = num_sdb = 0;
+ while (!done) {
+ /* Get the trailer entry of the sample-data-block */
+ te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
+
+ /* Leave loop if no more work to do (block full indicator) */
+ if (!te->f) {
+ done = 1;
+ if (!flush_all)
+ break;
+ }
+
+ /* Check the sample overflow count */
+ if (te->overflow)
+ /* Account sample overflows and, if a particular limit
+ * is reached, extend the sampling buffer.
+ * For details, see sfb_account_overflows().
+ */
+ sampl_overflow += te->overflow;
+
+ /* Timestamps are valid for full sample-data-blocks only */
+ debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p "
+ "overflow=%llu timestamp=0x%llx\n",
+ sdbt, te->overflow,
+ (te->f) ? trailer_timestamp(te) : 0ULL);
+
+ /* Collect all samples from a single sample-data-block and
+ * flag if an (perf) event overflow happened. If so, the PMU
+ * is stopped and remaining samples will be discarded.
+ */
+ hw_collect_samples(event, sdbt, &event_overflow);
+ num_sdb++;
+
+ /* Reset trailer (using compare-double-and-swap) */
+ do {
+ te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
+ te_flags |= SDB_TE_ALERT_REQ_MASK;
+ } while (!cmpxchg_double(&te->flags, &te->overflow,
+ te->flags, te->overflow,
+ te_flags, 0ULL));
+
+ /* Advance to next sample-data-block */
+ sdbt++;
+ if (is_link_entry(sdbt))
+ sdbt = get_next_sdbt(sdbt);
+
+ /* Update event hardware registers */
+ TEAR_REG(hwc) = (unsigned long) sdbt;
+
+ /* Stop processing sample-data if all samples of the current
+ * sample-data-block were flushed even if it was not full.
+ */
+ if (flush_all && done)
+ break;
+
+ /* If an event overflow happened, discard samples by
+ * processing any remaining sample-data-blocks.
+ */
+ if (event_overflow)
+ flush_all = 1;
+ }
+
+ /* Account sample overflows in the event hardware structure */
+ if (sampl_overflow)
+ OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
+ sampl_overflow, 1 + num_sdb);
+ if (sampl_overflow || event_overflow)
+ debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
+ "overflow stats: sample=%llu event=%llu\n",
+ sampl_overflow, event_overflow);
+}
+
+static void cpumsf_pmu_read(struct perf_event *event)
+{
+ /* Nothing to do ... updates are interrupt-driven */
+}
+
+/* Activate sampling control.
+ * Next call of pmu_enable() starts sampling.
+ */
+static void cpumsf_pmu_start(struct perf_event *event, int flags)
+{
+ struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ perf_pmu_disable(event->pmu);
+ event->hw.state = 0;
+ cpuhw->lsctl.cs = 1;
+ if (SAMPL_DIAG_MODE(&event->hw))
+ cpuhw->lsctl.cd = 1;
+ perf_pmu_enable(event->pmu);
+}
+
+/* Deactivate sampling control.
+ * Next call of pmu_enable() stops sampling.
+ */
+static void cpumsf_pmu_stop(struct perf_event *event, int flags)
+{
+ struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ perf_pmu_disable(event->pmu);
+ cpuhw->lsctl.cs = 0;
+ cpuhw->lsctl.cd = 0;
+ event->hw.state |= PERF_HES_STOPPED;
+
+ if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+ hw_perf_event_update(event, 1);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+ perf_pmu_enable(event->pmu);
+}
+
+static int cpumsf_pmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ int err;
+
+ if (cpuhw->flags & PMU_F_IN_USE)
+ return -EAGAIN;
+
+ if (!cpuhw->sfb.sdbt)
+ return -EINVAL;
+
+ err = 0;
+ perf_pmu_disable(event->pmu);
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ /* Set up sampling controls. Always program the sampling register
+ * using the SDB-table start. Reset TEAR_REG event hardware register
+ * that is used by hw_perf_event_update() to store the sampling buffer
+ * position after samples have been flushed.
+ */
+ cpuhw->lsctl.s = 0;
+ cpuhw->lsctl.h = 1;
+ cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
+ cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
+ cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
+ hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
+
+ /* Ensure sampling functions are in the disabled state. If disabled,
+ * switch on sampling enable control. */
+ if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) {
+ err = -EAGAIN;
+ goto out;
+ }
+ cpuhw->lsctl.es = 1;
+ if (SAMPL_DIAG_MODE(&event->hw))
+ cpuhw->lsctl.ed = 1;
+
+ /* Set in_use flag and store event */
+ event->hw.idx = 0; /* only one sampling event per CPU supported */
+ cpuhw->event = event;
+ cpuhw->flags |= PMU_F_IN_USE;
+
+ if (flags & PERF_EF_START)
+ cpumsf_pmu_start(event, PERF_EF_RELOAD);
+out:
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void cpumsf_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+
+ perf_pmu_disable(event->pmu);
+ cpumsf_pmu_stop(event, PERF_EF_UPDATE);
+
+ cpuhw->lsctl.es = 0;
+ cpuhw->lsctl.ed = 0;
+ cpuhw->flags &= ~PMU_F_IN_USE;
+ cpuhw->event = NULL;
+
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+}
+
+static int cpumsf_pmu_event_idx(struct perf_event *event)
+{
+ return event->hw.idx;
+}
+
+CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
+CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
+
+static struct attribute *cpumsf_pmu_events_attr[] = {
+ CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
+ CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+ NULL,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *cpumsf_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cpumsf_pmu_events_group = {
+ .name = "events",
+ .attrs = cpumsf_pmu_events_attr,
+};
+static struct attribute_group cpumsf_pmu_format_group = {
+ .name = "format",
+ .attrs = cpumsf_pmu_format_attr,
+};
+static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
+ &cpumsf_pmu_events_group,
+ &cpumsf_pmu_format_group,
+ NULL,
+};
+
+static struct pmu cpumf_sampling = {
+ .pmu_enable = cpumsf_pmu_enable,
+ .pmu_disable = cpumsf_pmu_disable,
+
+ .event_init = cpumsf_pmu_event_init,
+ .add = cpumsf_pmu_add,
+ .del = cpumsf_pmu_del,
+
+ .start = cpumsf_pmu_start,
+ .stop = cpumsf_pmu_stop,
+ .read = cpumsf_pmu_read,
+
+ .event_idx = cpumsf_pmu_event_idx,
+ .attr_groups = cpumsf_pmu_attr_groups,
+};
+
+static void cpumf_measurement_alert(struct ext_code ext_code,
+ unsigned int alert, unsigned long unused)
+{
+ struct cpu_hw_sf *cpuhw;
+
+ if (!(alert & CPU_MF_INT_SF_MASK))
+ return;
+ inc_irq_stat(IRQEXT_CMS);
+ cpuhw = &__get_cpu_var(cpu_hw_sf);
+
+ /* Measurement alerts are shared and might happen when the PMU
+ * is not reserved. Ignore these alerts in this case. */
+ if (!(cpuhw->flags & PMU_F_RESERVED))
+ return;
+
+ /* The processing below must take care of multiple alert events that
+ * might be indicated concurrently. */
+
+ /* Program alert request */
+ if (alert & CPU_MF_INT_SF_PRA) {
+ if (cpuhw->flags & PMU_F_IN_USE)
+ hw_perf_event_update(cpuhw->event, 0);
+ else
+ WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
+ }
+
+ /* Report measurement alerts only for non-PRA codes */
+ if (alert != CPU_MF_INT_SF_PRA)
+ debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert);
+
+ /* Sampling authorization change request */
+ if (alert & CPU_MF_INT_SF_SACA)
+ qsi(&cpuhw->qsi);
+
+ /* Loss of sample data due to high-priority machine activities */
+ if (alert & CPU_MF_INT_SF_LSDA) {
+ pr_err("Sample data was lost\n");
+ cpuhw->flags |= PMU_F_ERR_LSDA;
+ sf_disable();
+ }
+
+ /* Invalid sampling buffer entry */
+ if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
+ pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n",
+ alert);
+ cpuhw->flags |= PMU_F_ERR_IBE;
+ sf_disable();
+ }
+}
+
+static int cpumf_pmu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long) hcpu;
+ int flags;
+
+ /* Ignore the notification if no events are scheduled on the PMU.
+ * This might be racy...
+ */
+ if (!atomic_read(&num_events))
+ return NOTIFY_OK;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ flags = PMC_INIT;
+ smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+ break;
+ case CPU_DOWN_PREPARE:
+ flags = PMC_RELEASE;
+ smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
+{
+ if (!cpum_sf_avail())
+ return -ENODEV;
+ return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+}
+
+static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ unsigned long min, max;
+
+ if (!cpum_sf_avail())
+ return -ENODEV;
+ if (!val || !strlen(val))
+ return -EINVAL;
+
+ /* Valid parameter values: "min,max" or "max" */
+ min = CPUM_SF_MIN_SDB;
+ max = CPUM_SF_MAX_SDB;
+ if (strchr(val, ','))
+ rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL;
+ else
+ rc = kstrtoul(val, 10, &max);
+
+ if (min < 2 || min >= max || max > get_num_physpages())
+ rc = -EINVAL;
+ if (rc)
+ return rc;
+
+ sfb_set_limits(min, max);
+ pr_info("The sampling buffer limits have changed to: "
+ "min=%lu max=%lu (diag=x%lu)\n",
+ CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
+ return 0;
+}
+
+#define param_check_sfb_size(name, p) __param_check(name, p, void)
+static struct kernel_param_ops param_ops_sfb_size = {
+ .set = param_set_sfb_size,
+ .get = param_get_sfb_size,
+};
+
+#define RS_INIT_FAILURE_QSI 0x0001
+#define RS_INIT_FAILURE_BSDES 0x0002
+#define RS_INIT_FAILURE_ALRT 0x0003
+#define RS_INIT_FAILURE_PERF 0x0004
+static void __init pr_cpumsf_err(unsigned int reason)
+{
+ pr_err("Sampling facility support for perf is not available: "
+ "reason=%04x\n", reason);
+}
+
+static int __init init_cpum_sampling_pmu(void)
+{
+ struct hws_qsi_info_block si;
+ int err;
+
+ if (!cpum_sf_avail())
+ return -ENODEV;
+
+ memset(&si, 0, sizeof(si));
+ if (qsi(&si)) {
+ pr_cpumsf_err(RS_INIT_FAILURE_QSI);
+ return -ENODEV;
+ }
+
+ if (si.bsdes != sizeof(struct hws_basic_entry)) {
+ pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
+ return -EINVAL;
+ }
+
+ if (si.ad)
+ sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+
+ sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
+ if (!sfdbg)
+ pr_err("Registering for s390dbf failed\n");
+ debug_register_view(sfdbg, &debug_sprintf_view);
+
+ err = register_external_interrupt(0x1407, cpumf_measurement_alert);
+ if (err) {
+ pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
+ goto out;
+ }
+
+ err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
+ if (err) {
+ pr_cpumsf_err(RS_INIT_FAILURE_PERF);
+ unregister_external_interrupt(0x1407, cpumf_measurement_alert);
+ goto out;
+ }
+ perf_cpu_notifier(cpumf_pmu_notifier);
+out:
+ return err;
+}
+arch_initcall(init_cpum_sampling_pmu);
+core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 2343c21..5d2dfa3 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -1,7 +1,7 @@
/*
* Performance event support for s390x
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -16,15 +16,19 @@
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/export.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
+#include <asm/sysinfo.h>
const char *perf_pmu_name(void)
{
if (cpum_cf_avail() || cpum_sf_avail())
- return "CPU-measurement facilities (CPUMF)";
+ return "CPU-Measurement Facilities (CPU-MF)";
return "pmu";
}
EXPORT_SYMBOL(perf_pmu_name);
@@ -35,6 +39,8 @@ int perf_num_counters(void)
if (cpum_cf_avail())
num += PERF_CPUM_CF_MAX_CTR;
+ if (cpum_sf_avail())
+ num += PERF_CPUM_SF_MAX_CTR;
return num;
}
@@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs)
{
if (user_mode(regs))
return false;
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+#if IS_ENABLED(CONFIG_KVM)
return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
return false;
@@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
: PERF_RECORD_MISC_GUEST_KERNEL;
}
+static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
+{
+ struct perf_sf_sde_regs *sde_regs;
+ unsigned long flags;
+
+ sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
+ if (sde_regs->in_guest)
+ flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
+ : PERF_RECORD_MISC_GUEST_KERNEL;
+ else
+ flags = user_mode(regs) ? PERF_RECORD_MISC_USER
+ : PERF_RECORD_MISC_KERNEL;
+ return flags;
+}
+
unsigned long perf_misc_flags(struct pt_regs *regs)
{
+ /* Check if the cpum_sf PMU has created the pt_regs structure.
+ * In this case, perf misc flags can be easily extracted. Otherwise,
+ * do regular checks on the pt_regs content.
+ */
+ if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
+ if (!regs->gprs[15])
+ return perf_misc_flags_sf(regs);
+
if (is_in_guest(regs))
return perf_misc_guest_flags(regs);
@@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
: PERF_RECORD_MISC_KERNEL;
}
-void perf_event_print_debug(void)
+void print_debug_cf(void)
{
struct cpumf_ctr_info cf_info;
- unsigned long flags;
- int cpu;
-
- if (!cpum_cf_avail())
- return;
-
- local_irq_save(flags);
+ int cpu = smp_processor_id();
- cpu = smp_processor_id();
memset(&cf_info, 0, sizeof(cf_info));
if (!qctri(&cf_info))
pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
cpu, cf_info.cfvn, cf_info.csvn,
cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
+}
+
+static void print_debug_sf(void)
+{
+ struct hws_qsi_info_block si;
+ int cpu = smp_processor_id();
+ memset(&si, 0, sizeof(si));
+ if (qsi(&si))
+ return;
+
+ pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
+ cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
+ si.cpu_speed);
+
+ if (si.as)
+ pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
+ " bsdes=%i tear=%016lx dear=%016lx\n", cpu,
+ si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
+ if (si.ad)
+ pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
+ " dsdes=%i tear=%016lx dear=%016lx\n", cpu,
+ si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
+}
+
+void perf_event_print_debug(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (cpum_cf_avail())
+ print_debug_cf();
+ if (cpum_sf_avail())
+ print_debug_sf();
local_irq_restore(flags);
}
+/* Service level infrastructure */
+static void sl_print_counter(struct seq_file *m)
+{
+ struct cpumf_ctr_info ci;
+
+ memset(&ci, 0, sizeof(ci));
+ if (qctri(&ci))
+ return;
+
+ seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
+ "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
+}
+
+static void sl_print_sampling(struct seq_file *m)
+{
+ struct hws_qsi_info_block si;
+
+ memset(&si, 0, sizeof(si));
+ if (qsi(&si))
+ return;
+
+ if (!si.as && !si.ad)
+ return;
+
+ seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
+ " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
+ si.cpu_speed);
+ if (si.as)
+ seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
+ " sample_size=%u\n", si.bsdes);
+ if (si.ad)
+ seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
+ " sample_size=%u\n", si.dsdes);
+}
+
+static void service_level_perf_print(struct seq_file *m,
+ struct service_level *sl)
+{
+ if (cpum_cf_avail())
+ sl_print_counter(m);
+ if (cpum_sf_avail())
+ sl_print_sampling(m);
+}
+
+static struct service_level service_level_perf = {
+ .seq_print = service_level_perf_print,
+};
+
+static int __init service_level_perf_register(void)
+{
+ return register_service_level(&service_level_perf);
+}
+arch_initcall(service_level_perf_register);
+
/* See also arch/s390/kernel/traps.c */
static unsigned long __store_trace(struct perf_callchain_entry *entry,
unsigned long sp,
@@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
}
+
+/* Perf defintions for PMU event attributes in sysfs */
+ssize_t cpumf_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%04llx,name=%s\n",
+ pmu_attr->id, attr->attr.name);
+}
+
+/* Reserve/release functions for sharing perf hardware */
+static DEFINE_SPINLOCK(perf_hw_owner_lock);
+static void *perf_sampling_owner;
+
+int perf_reserve_sampling(void)
+{
+ int err;
+
+ err = 0;
+ spin_lock(&perf_hw_owner_lock);
+ if (perf_sampling_owner) {
+ pr_warn("The sampling facility is already reserved by %p\n",
+ perf_sampling_owner);
+ err = -EBUSY;
+ } else
+ perf_sampling_owner = __builtin_return_address(0);
+ spin_unlock(&perf_hw_owner_lock);
+ return err;
+}
+EXPORT_SYMBOL(perf_reserve_sampling);
+
+void perf_release_sampling(void)
+{
+ spin_lock(&perf_hw_owner_lock);
+ WARN_ON(!perf_sampling_owner);
+ perf_sampling_owner = NULL;
+ spin_unlock(&perf_hw_owner_lock);
+}
+EXPORT_SYMBOL(perf_release_sampling);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7ed0d4e..dd14532 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+ unsigned long ret;
- if (ret < mm->brk)
- return mm->brk;
- return ret;
+ ret = PAGE_ALIGN(mm->brk + brk_rnd());
+ return (ret > mm->brk) ? ret : mm->brk;
}
unsigned long randomize_et_dyn(unsigned long base)
{
- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+ unsigned long ret;
if (!(current->flags & PF_RANDOMIZE))
return base;
- if (ret < base)
- return base;
- return ret;
+ ret = PAGE_ALIGN(base + brk_rnd());
+ return (ret > base) ? ret : base;
}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index e65c91c..f6be608 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task)
#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
- unsigned long cr[3], cr_new[3];
+ unsigned long cr, cr_new;
- __ctl_store(cr, 0, 2);
- cr_new[1] = cr[1];
+ __ctl_store(cr, 0, 0);
/* Set or clear transaction execution TXC bit 8. */
+ cr_new = cr | (1UL << 55);
if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr_new[0] = cr[0] & ~(1UL << 55);
- else
- cr_new[0] = cr[0] | (1UL << 55);
+ cr_new &= ~(1UL << 55);
+ if (cr_new != cr)
+ __ctl_load(cr, 0, 0);
/* Set or clear transaction execution TDC bits 62 and 63. */
- cr_new[2] = cr[2] & ~3UL;
+ __ctl_store(cr, 2, 2);
+ cr_new = cr & ~3UL;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
- cr_new[2] |= 1UL;
+ cr_new |= 1UL;
else
- cr_new[2] |= 2UL;
+ cr_new |= 2UL;
}
- if (memcmp(&cr_new, &cr, sizeof(cr)))
- __ctl_load(cr_new, 0, 2);
+ if (cr_new != cr)
+ __ctl_load(cr_new, 2, 2);
}
#endif
/* Copy user specified PER registers */
@@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task)
void user_enable_single_step(struct task_struct *task)
{
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
- if (task == current)
- update_cr_regs(task);
}
void user_disable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
- if (task == current)
- update_cr_regs(task);
}
/*
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 3bac589..9f60467 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -5,7 +5,7 @@
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL(sie64a);
EXPORT_SYMBOL(sie_exit);
#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4444875..09e2f46 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -373,7 +373,7 @@ static void __init setup_lowcore(void)
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
- * restart data to the absolute zero lowcore. This is necesary if
+ * restart data to the absolute zero lowcore. This is necessary if
* PSW restart is done on an offline CPU that has lowcore zero.
*/
lc->restart_stack = (unsigned long) restart_stack;
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
setup_vmcoreinfo();
setup_lowcore();
+ smp_fill_possible_mask();
cpu_init();
s390_init_cpu_topology();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index dc4a534..a7125b6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,7 +59,7 @@ enum {
};
struct pcpu {
- struct cpu cpu;
+ struct cpu *cpu;
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long async_stack; /* async stack for the cpu */
unsigned long panic_stack; /* panic stack for the cpu */
@@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
{
int order;
- set_bit(ec_bit, &pcpu->ec_mask);
- order = pcpu_running(pcpu) ?
- SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
+ if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
+ return;
+ order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
pcpu_sigp_retry(pcpu, order, 0);
}
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return 0;
}
-static int __init setup_possible_cpus(char *s)
-{
- int max, cpu;
+static unsigned int setup_possible_cpus __initdata;
- if (kstrtoint(s, 0, &max) < 0)
- return 0;
- init_cpu_possible(cpumask_of(0));
- for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
- set_cpu_possible(cpu, true);
+static int __init _setup_possible_cpus(char *s)
+{
+ get_option(&s, &setup_possible_cpus);
return 0;
}
-early_param("possible_cpus", setup_possible_cpus);
+early_param("possible_cpus", _setup_possible_cpus);
#ifdef CONFIG_HOTPLUG_CPU
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
+void __init smp_fill_possible_mask(void)
+{
+ unsigned int possible, cpu;
+
+ possible = setup_possible_cpus;
+ if (!possible)
+ possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
+ for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
+ set_cpu_possible(cpu, true);
+}
+
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
@@ -958,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
- struct cpu *c = &pcpu_devices[cpu].cpu;
+ struct cpu *c = pcpu_devices[cpu].cpu;
struct device *s = &c->dev;
int err = 0;
@@ -975,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
static int smp_add_present_cpu(int cpu)
{
- struct cpu *c = &pcpu_devices[cpu].cpu;
- struct device *s = &c->dev;
+ struct device *s;
+ struct cpu *c;
int rc;
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+ pcpu_devices[cpu].cpu = c;
+ s = &c->dev;
c->hotpluggable = 1;
rc = register_cpu(c, cpu);
if (rc)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 2440602..d101dae 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
} else {
/*
- * Set condition code 3 to stop the guest from issueing channel
+ * Set condition code 3 to stop the guest from issuing channel
* I/O instructions.
*/
kvm_s390_set_psw_cc(vcpu, 3);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index dbdab3e..0632dc5 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -74,8 +74,8 @@ static size_t copy_in_kernel(size_t count, void __user *to,
/*
* Returns kernel address for user virtual address. If the returned address is
- * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
- * contains the (negative) exception code.
+ * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
+ * address contains the (negative) exception code.
*/
#ifdef CONFIG_64BIT
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e794c88..3584ed9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap,
* @addr: address in the guest address space
* @len: length of the memory area to unmap
*
- * Returns 0 if the unmap succeded, -EINVAL if not.
+ * Returns 0 if the unmap succeeded, -EINVAL if not.
*/
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
{
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment);
* @from: source address in the parent address space
* @to: target address in the guest address space
*
- * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
+ * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
*/
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 16871da..708d60e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
- /* m %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
- /* lr %r5,%r4 */
- EMIT2(0x1854);
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break;
case BPF_S_ALU_MOD_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
@@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_K: /* A %= K */
+ if (K == 1) {
+ /* lhi %r5,0 */
+ EMIT4(0xa7580000);
+ break;
+ }
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* d %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 231ceca..a32c967 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -26,9 +26,6 @@
#define MAX_NUM_SDB 511
#define MIN_NUM_SDB 1
-#define ALERT_REQ_MASK 0x4000000000000000ul
-#define BUFFER_FULL_MASK 0x8000000000000000ul
-
DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
struct hws_execute_parms {
@@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom);
static unsigned char hws_flush_all;
static unsigned int hws_oom;
+static unsigned int hws_alert;
static struct workqueue_struct *hws_wq;
static unsigned int hws_state;
@@ -65,43 +63,6 @@ static unsigned long interval;
static unsigned long min_sampler_rate;
static unsigned long max_sampler_rate;
-static int ssctl(void *buffer)
-{
- int cc;
-
- /* set in order to detect a program check */
- cc = 1;
-
- asm volatile(
- "0: .insn s,0xB2870000,0(%1)\n"
- "1: ipm %0\n"
- " srl %0,28\n"
- "2:\n"
- EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
- : "+d" (cc), "+a" (buffer)
- : "m" (*((struct hws_ssctl_request_block *)buffer))
- : "cc", "memory");
-
- return cc ? -EINVAL : 0 ;
-}
-
-static int qsi(void *buffer)
-{
- int cc;
- cc = 1;
-
- asm volatile(
- "0: .insn s,0xB2860000,0(%1)\n"
- "1: lhi %0,0\n"
- "2:\n"
- EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
- : "=d" (cc), "+a" (buffer)
- : "m" (*((struct hws_qsi_info_block *)buffer))
- : "cc", "memory");
-
- return cc ? -EINVAL : 0;
-}
-
static void execute_qsi(void *parms)
{
struct hws_execute_parms *ep = parms;
@@ -113,7 +74,7 @@ static void execute_ssctl(void *parms)
{
struct hws_execute_parms *ep = parms;
- ep->rc = ssctl(ep->buffer);
+ ep->rc = lsctl(ep->buffer);
}
static int smp_ctl_ssctl_stop(int cpu)
@@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu)
return ep.rc;
}
-static inline unsigned long *trailer_entry_ptr(unsigned long v)
-{
- void *ret;
-
- ret = (void *)v;
- ret += PAGE_SIZE;
- ret -= sizeof(struct hws_trailer_entry);
-
- return (unsigned long *) ret;
-}
-
static void hws_ext_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
@@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
+ if (!hws_alert)
+ return;
+
inc_irq_stat(IRQEXT_CMS);
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
@@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void)
}
}
-static int is_link_entry(unsigned long *s)
-{
- return *s & 0x1ul ? 1 : 0;
-}
-
-static unsigned long *get_next_sdbt(unsigned long *s)
-{
- return (unsigned long *) (*s & ~0x1ul);
-}
-
static int prepare_cpu_buffers(void)
{
int cpu;
@@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu)
}
*sdbt = sdb;
trailer = trailer_entry_ptr(*sdbt);
- *trailer = ALERT_REQ_MASK;
+ *trailer = SDB_TE_ALERT_REQ_MASK;
sdbt++;
mutex_unlock(&hws_sem_oom);
}
@@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu)
trailer = trailer_entry_ptr(*sdbt);
/* leave loop if no more work to do */
- if (!(*trailer & BUFFER_FULL_MASK)) {
+ if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) {
done = 1;
if (!hws_flush_all)
continue;
@@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu)
static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
unsigned long *dear)
{
- struct hws_data_entry *sample_data_ptr;
+ struct hws_basic_entry *sample_data_ptr;
unsigned long *trailer;
trailer = trailer_entry_ptr(*sdbt);
@@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
trailer = dear;
}
- sample_data_ptr = (struct hws_data_entry *)(*sdbt);
+ sample_data_ptr = (struct hws_basic_entry *)(*sdbt);
while ((unsigned long *)sample_data_ptr < trailer) {
struct pt_regs *regs = NULL;
@@ -1002,6 +945,7 @@ int hwsampler_deallocate(void)
goto deallocate_exit;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+ hws_alert = 0;
deallocate_sdbt();
hws_state = HWS_DEALLOCATED;
@@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void)
if (hws_state == HWS_STOPPED) {
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+ hws_alert = 0;
deallocate_sdbt();
}
if (hws_wq) {
@@ -1190,6 +1135,7 @@ start_all_exit:
hws_oom = 1;
hws_flush_all = 0;
/* now let them in, 1407 CPUMF external interrupts */
+ hws_alert = 1;
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
index 0022e1e..a483d06 100644
--- a/arch/s390/oprofile/hwsampler.h
+++ b/arch/s390/oprofile/hwsampler.h
@@ -9,27 +9,7 @@
#define HWSAMPLER_H_
#include <linux/workqueue.h>
-
-struct hws_qsi_info_block /* QUERY SAMPLING information block */
-{ /* Bit(s) */
- unsigned int b0_13:14; /* 0-13: zeros */
- unsigned int as:1; /* 14: sampling authorisation control*/
- unsigned int b15_21:7; /* 15-21: zeros */
- unsigned int es:1; /* 22: sampling enable control */
- unsigned int b23_29:7; /* 23-29: zeros */
- unsigned int cs:1; /* 30: sampling activation control */
- unsigned int:1; /* 31: reserved */
- unsigned int bsdes:16; /* 4-5: size of sampling entry */
- unsigned int:16; /* 6-7: reserved */
- unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
- unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
- unsigned long tear; /* 24-31: TEAR contents */
- unsigned long dear; /* 32-39: DEAR contents */
- unsigned int rsvrd0; /* 40-43: reserved */
- unsigned int cpu_speed; /* 44-47: CPU speed */
- unsigned long long rsvrd1; /* 48-55: reserved */
- unsigned long long rsvrd2; /* 56-63: reserved */
-};
+#include <asm/cpu_mf.h>
struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */
{ /* bytes 0 - 7 Bit(s) */
@@ -68,36 +48,6 @@ struct hws_cpu_buffer {
unsigned int stop_mode:1;
};
-struct hws_data_entry {
- unsigned int def:16; /* 0-15 Data Entry Format */
- unsigned int R:4; /* 16-19 reserved */
- unsigned int U:4; /* 20-23 Number of unique instruct. */
- unsigned int z:2; /* zeros */
- unsigned int T:1; /* 26 PSW DAT mode */
- unsigned int W:1; /* 27 PSW wait state */
- unsigned int P:1; /* 28 PSW Problem state */
- unsigned int AS:2; /* 29-30 PSW address-space control */
- unsigned int I:1; /* 31 entry valid or invalid */
- unsigned int:16;
- unsigned int prim_asn:16; /* primary ASN */
- unsigned long long ia; /* Instruction Address */
- unsigned long long gpp; /* Guest Program Parameter */
- unsigned long long hpp; /* Host Program Parameter */
-};
-
-struct hws_trailer_entry {
- unsigned int f:1; /* 0 - Block Full Indicator */
- unsigned int a:1; /* 1 - Alert request control */
- unsigned long:62; /* 2 - 63: Reserved */
- unsigned long overflow; /* 64 - sample Overflow count */
- unsigned long timestamp; /* 16 - time-stamp */
- unsigned long timestamp1; /* */
- unsigned long reserved1; /* 32 -Reserved */
- unsigned long reserved2; /* */
- unsigned long progusage1; /* 48 - reserved for programming use */
- unsigned long progusage2; /* */
-};
-
int hwsampler_setup(void);
int hwsampler_shutdown(void);
int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 04e1b6a..9ffe645 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -10,6 +10,7 @@
*/
#include <linux/oprofile.h>
+#include <linux/perf_event.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
"(report cpu_type \"timer\"");
+static int __oprofile_hwsampler_start(void)
+{
+ int retval;
+
+ retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
+ if (retval)
+ return retval;
+
+ retval = hwsampler_start_all(oprofile_hw_interval);
+ if (retval)
+ hwsampler_deallocate();
+
+ return retval;
+}
+
static int oprofile_hwsampler_start(void)
{
int retval;
@@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void)
if (!hwsampler_running)
return timer_ops.start();
- retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
+ retval = perf_reserve_sampling();
if (retval)
return retval;
- retval = hwsampler_start_all(oprofile_hw_interval);
+ retval = __oprofile_hwsampler_start();
if (retval)
- hwsampler_deallocate();
+ perf_release_sampling();
return retval;
}
@@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void)
hwsampler_stop_all();
hwsampler_deallocate();
+ perf_release_sampling();
return;
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bf7c73d..0820362 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -919,17 +919,23 @@ static void zpci_mem_exit(void)
kmem_cache_destroy(zdev_fmb_cache);
}
-static unsigned int s390_pci_probe;
+static unsigned int s390_pci_probe = 1;
+static unsigned int s390_pci_initialized;
char * __init pcibios_setup(char *str)
{
- if (!strcmp(str, "on")) {
- s390_pci_probe = 1;
+ if (!strcmp(str, "off")) {
+ s390_pci_probe = 0;
return NULL;
}
return str;
}
+bool zpci_is_enabled(void)
+{
+ return s390_pci_initialized;
+}
+
static int __init pci_base_init(void)
{
int rc;
@@ -961,6 +967,7 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
+ s390_pci_initialized = 1;
return 0;
out_find:
@@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init);
void zpci_rescan(void)
{
- clp_rescan_pci_devices_simple();
+ if (zpci_is_enabled())
+ clp_rescan_pci_devices_simple();
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 9b83d08..60c11a6 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
- atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
+ atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK);
}
@@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
zpci_err_hex(&dma_addr, sizeof(dma_addr));
}
- atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
+ atomic64_add(npages, &zdev->fmb->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
dma_free_iommu(zdev, iommu_page_index, npages);
}
@@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
- atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
pa = page_to_phys(page);
memset((void *) pa, 0, size);
@@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
}
+ atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
if (dma_handle)
*dma_handle = map;
return (void *) pa;
@@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
- s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
- DMA_BIDIRECTIONAL, NULL);
+ struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+
+ size = PAGE_ALIGN(size);
+ atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
+ s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long) pa, get_order(size));
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 800f064..01e251b 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -43,9 +43,8 @@ struct zpci_ccdf_avail {
u16 pec; /* PCI event code */
} __packed;
-void zpci_event_error(void *data)
+static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
{
- struct zpci_ccdf_err *ccdf = data;
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
zpci_err("error CCDF:\n");
@@ -58,9 +57,14 @@ void zpci_event_error(void *data)
pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
}
-void zpci_event_availability(void *data)
+void zpci_event_error(void *data)
+{
+ if (zpci_is_enabled())
+ __zpci_event_error(data);
+}
+
+static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
- struct zpci_ccdf_avail *ccdf = data;
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
int ret;
@@ -75,6 +79,7 @@ void zpci_event_availability(void *data)
if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zdev->fh = ccdf->fh;
ret = zpci_enable_device(zdev);
if (ret)
break;
@@ -98,9 +103,14 @@ void zpci_event_availability(void *data)
break;
case 0x0304: /* Configured -> Standby */
- if (pdev)
+ if (pdev) {
+ /* Give the driver a hint that the function is
+ * already unusable. */
+ pdev->error_state = pci_channel_io_perm_failure;
pci_stop_and_remove_bus_device(pdev);
+ }
+ zdev->fh = ccdf->fh;
zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
break;
@@ -108,6 +118,8 @@ void zpci_event_availability(void *data)
clp_rescan_pci_devices();
break;
case 0x0308: /* Standby -> Reserved */
+ if (!zdev)
+ break;
pci_stop_root_bus(zdev->bus);
pci_remove_root_bus(zdev->bus);
break;
@@ -115,3 +127,9 @@ void zpci_event_availability(void *data)
break;
}
}
+
+void zpci_event_availability(void *data)
+{
+ if (zpci_is_enabled())
+ __zpci_event_availability(data);
+}
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index f3414ad..fe7471e 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -1,6 +1,7 @@
header-y +=
+generic-y += barrier.h
generic-y += clkdev.h
generic-y += trace_clock.h
generic-y += xor.h
diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h
deleted file mode 100644
index 0eacb64..0000000
--- a/arch/score/include/asm/barrier.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _ASM_SCORE_BARRIER_H
-#define _ASM_SCORE_BARRIER_H
-
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-
-#define read_barrier_depends() do {} while (0)
-#define smp_read_barrier_depends() do {} while (0)
-
-#define set_mb(var, value) do {var = value; wmb(); } while (0)
-
-#endif /* _ASM_SCORE_BARRIER_H */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 9b0979f..ce29831 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -66,6 +66,7 @@ config SUPERH32
select PERF_EVENTS
select ARCH_HIBERNATION_POSSIBLE if MMU
select SPARSE_IRQ
+ select HAVE_CC_STACKPROTECTOR
config SUPERH64
def_bool ARCH = "sh64"
@@ -695,20 +696,6 @@ config SECCOMP
If unsure, say N.
-config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
- depends on SUPERH32
- help
- This option turns on the -fstack-protector GCC feature. This
- feature puts, at the beginning of functions, a canary value on
- the stack just before the return address, and validates
- the value just before actually returning. Stack based buffer
- overflows (that need to overwrite this return address) now also
- overwrite the canary, which gets detected and the attack is then
- neutralized via a kernel panic.
-
- This feature requires gcc version 4.2 or above.
-
config SMP
bool "Symmetric multi-processing support"
depends on SYS_SUPPORTS_SMP
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index aed701c..d4d16e4 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -199,10 +199,6 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
KBUILD_CFLAGS += -fasynchronous-unwind-tables
endif
-ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
- KBUILD_CFLAGS += -fstack-protector
-endif
-
libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 72c103d..4371530 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -26,29 +26,14 @@
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
-#define wmb() __asm__ __volatile__ ("synco": : :"memory")
+#define wmb() mb()
#define ctrl_barrier() __icbi(PAGE_OFFSET)
-#define read_barrier_depends() do { } while(0)
#else
-#define mb() __asm__ __volatile__ ("": : :"memory")
-#define rmb() mb()
-#define wmb() __asm__ __volatile__ ("": : :"memory")
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
-#define read_barrier_depends() do { } while(0)
-#endif
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
#endif
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#include <asm-generic/barrier.h>
+
#endif /* __ASM_SH_BARRIER_H */
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 2a0a596..d77f2f6 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_FLATMEM
+/* need in pfn_valid macro */
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+#endif
#define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name)
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
index c1b7665..ae69eda 100644
--- a/arch/sparc/include/asm/barrier_32.h
+++ b/arch/sparc/include/asm/barrier_32.h
@@ -1,15 +1,7 @@
#ifndef __SPARC_BARRIER_H
#define __SPARC_BARRIER_H
-/* XXX Change this if we ever use a PSO mode kernel. */
-#define mb() __asm__ __volatile__ ("" : : : "memory")
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
-#define smp_mb() __asm__ __volatile__("":::"memory")
-#define smp_rmb() __asm__ __volatile__("":::"memory")
-#define smp_wmb() __asm__ __volatile__("":::"memory")
-#define smp_read_barrier_depends() do { } while(0)
+#include <asm/processor.h> /* for nop() */
+#include <asm-generic/barrier.h>
#endif /* !(__SPARC_BARRIER_H) */
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 95d4598..b5aad96 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define smp_read_barrier_depends() do { } while(0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e562d3c..ad7e178 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
-#define __copy_to_user_inatomic ___copy_to_user
-#define __copy_from_user_inatomic ___copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
struct pt_regs;
extern unsigned long compute_effective_address(struct pt_regs *,
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 070ed14..76663b0 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
return 1;
#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
+ if (dev_is_pci(dev))
return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2096468..e7e215d 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
*/
int dma_supported(struct device *dev, u64 mask)
{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
+ if (dev_is_pci(dev))
return 1;
-#endif
+
return 0;
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 60b19f5..b45fe3f 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -6,6 +6,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b66a533..b085311 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -123,11 +123,12 @@ void smp_callin(void)
rmb();
set_cpu_online(cpuid, true);
- local_irq_enable();
/* idle thread is expected to have preempt disabled */
preempt_disable();
+ local_irq_enable();
+
cpu_startup_entry(CPUHP_ONLINE);
}
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 218b6b2..01fe994 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_MUL_K: /* A *= K */
emit_alu_K(MUL, K);
break;
- case BPF_S_ALU_DIV_K: /* A /= K */
- emit_alu_K(MUL, K);
- emit_read_y(r_A);
+ case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
+ if (K == 1)
+ break;
+ emit_write_y(G0);
+#ifdef CONFIG_SPARC32
+ /* The Sparc v8 architecture requires
+ * three instructions between a %y
+ * register write and the first use.
+ */
+ emit_nop();
+ emit_nop();
+ emit_nop();
+#endif
+ emit_alu_K(DIV, K);
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
emit_cmpi(r_X, 0);
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index a9a73da..b5a05d0 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -22,59 +22,6 @@
#include <arch/spr_def.h>
#include <asm/timex.h>
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- */
-#define read_barrier_depends() do { } while (0)
-
#define __sync() __insn_mf()
#include <hv/syscall_public.h>
@@ -125,20 +72,7 @@ mb_incoherent(void)
#define mb() fast_mb()
#define iob() fast_iob()
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
-#endif
-
-#define set_mb(var, value) \
- do { var = value; mb(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_BARRIER_H */
diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h
index a6620e5..83d6a52 100644
--- a/arch/unicore32/include/asm/barrier.h
+++ b/arch/unicore32/include/asm/barrier.h
@@ -14,15 +14,6 @@
#define dsb() __asm__ __volatile__ ("" : : : "memory")
#define dmb() __asm__ __volatile__ ("" : : : "memory")
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
-
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* __UNICORE_BARRIER_H__ */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0952ecd..cd18b83 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -125,6 +125,7 @@ config X86
select RTC_LIB
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
+ select HAVE_CC_STACKPROTECTOR
config INSTRUCTION_DECODER
def_bool y
@@ -438,42 +439,26 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_WANT_INTEL_MID
+config X86_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
- ---help---
- Select to build a kernel capable of supporting Intel MID platform
- systems which do not have the PCI legacy interfaces (Moorestown,
- Medfield). If you are building for a PC class system say N here.
-
-if X86_WANT_INTEL_MID
-
-config X86_INTEL_MID
- bool
-
-config X86_MDFLD
- bool "Medfield MID platform"
depends on PCI
depends on PCI_GOANY
depends on X86_IO_APIC
- select X86_INTEL_MID
select SFI
+ select I2C
select DW_APB_TIMER
select APB_TIMER
- select I2C
- select SPI
select INTEL_SCU_IPC
- select X86_PLATFORM_DEVICES
select MFD_INTEL_MSIC
---help---
- Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
- Internet Device(MID) platform.
- Unlike standard x86 PCs, Medfield does not have many legacy devices
- nor standard legacy replacement devices/features. e.g. Medfield does
- not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+ Select to build a kernel capable of supporting Intel MID (Mobile
+ Internet Device) platform systems which do not have the PCI legacy
+ interfaces. If you are building for a PC class system say N here.
-endif
+ Intel MID platforms are based on an Intel processor and chipset which
+ consume less power than most of the x86 derivatives.
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
@@ -1080,10 +1065,6 @@ config MICROCODE_OLD_INTERFACE
def_bool y
depends on MICROCODE
-config MICROCODE_INTEL_LIB
- def_bool y
- depends on MICROCODE_INTEL
-
config MICROCODE_INTEL_EARLY
def_bool n
@@ -1617,22 +1598,6 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
-config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection"
- ---help---
- This option turns on the -fstack-protector GCC feature. This
- feature puts, at the beginning of functions, a canary value on
- the stack just before the return address, and validates
- the value just before actually returning. Stack based buffer
- overflows (that need to overwrite this return address) now also
- overwrite the canary, which gets detected and the attack is then
- neutralized via a kernel panic.
-
- This feature requires gcc version 4.2 or above, or a distribution
- gcc with the feature backported. Older versions are automatically
- detected and for those versions, this configuration option is
- ignored. (and a warning is printed during bootup)
-
source kernel/Kconfig.hz
config KEXEC
@@ -1728,16 +1693,67 @@ config RELOCATABLE
Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
it has been loaded at and the compile time physical address
- (CONFIG_PHYSICAL_START) is ignored.
+ (CONFIG_PHYSICAL_START) is used as the minimum location.
-# Relocation on x86-32 needs some additional build support
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ depends on RELOCATABLE
+ depends on !HIBERNATION
+ default n
+ ---help---
+ Randomizes the physical and virtual address at which the
+ kernel image is decompressed, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ Entropy is generated using the RDRAND instruction if it is
+ supported. If RDTSC is supported, it is used as well. If
+ neither RDRAND nor RDTSC are supported, then randomness is
+ read from the i8254 timer.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET,
+ and aligned according to PHYSICAL_ALIGN. Since the kernel is
+ built using 2GiB addressing, and PHYSICAL_ALGIN must be at a
+ minimum of 2MiB, only 10 bits of entropy is theoretically
+ possible. At best, due to page table layouts, 64-bit can use
+ 9 bits of entropy and 32-bit uses 8 bits.
+
+ If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum kASLR offset allowed" if EXPERT
+ depends on RANDOMIZE_BASE
+ range 0x0 0x20000000 if X86_32
+ default "0x20000000" if X86_32
+ range 0x0 0x40000000 if X86_64
+ default "0x40000000" if X86_64
+ ---help---
+ The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical
+ memory is used to determine the maximal offset in bytes that will
+ be applied to the kernel when kernel Address Space Layout
+ Randomization (kASLR) is active. This must be a multiple of
+ PHYSICAL_ALIGN.
+
+ On 32-bit this is limited to 512MiB by page table layouts. The
+ default is 512MiB.
+
+ On 64-bit this is limited by how the kernel fixmap page table is
+ positioned, so this cannot be larger than 1GiB currently. Without
+ RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel
+ and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the
+ modules area will shrink to compensate, up to the current maximum
+ 1GiB to 1GiB split. The default is 1GiB.
+
+ If unsure, leave at the default value.
+
+# Relocation on x86 needs some additional build support
config X86_NEED_RELOCS
def_bool y
- depends on X86_32 && RELOCATABLE
+ depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE)
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
- default "0x1000000"
+ default "0x200000"
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
@@ -2393,6 +2409,14 @@ config X86_DMA_REMAP
bool
depends on STA2X11
+config IOSF_MBI
+ bool
+ depends on PCI
+ ---help---
+ To be selected by modules requiring access to the Intel OnChip System
+ Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms
+ enumerable by PCI.
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 57d0215..13b22e0 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -89,13 +89,11 @@ else
KBUILD_CFLAGS += -maccumulate-outgoing-args
endif
+# Make sure compiler does not have buggy stack-protector support.
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
- stackp-y := -fstack-protector
- KBUILD_CFLAGS += $(stackp-y)
- else
- $(warning stack protector enabled but no compiler support)
+ ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
+ $(warning stack-protector enabled but compiler support broken)
endif
endif
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index d9c1195..de70669 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage
targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
subdir- := compressed
-setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o
+setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o
setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o
setup-y += video-mode.o version.o
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
index 1dfbf64..d401b4a 100644
--- a/arch/x86/boot/bioscall.S
+++ b/arch/x86/boot/bioscall.S
@@ -1,6 +1,6 @@
/* -----------------------------------------------------------------------
*
- * Copyright 2009 Intel Corporation; author H. Peter Anvin
+ * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
@@ -13,8 +13,8 @@
* touching registers they shouldn't be.
*/
- .code16gcc
- .text
+ .code16
+ .section ".inittext","ax"
.globl intcall
.type intcall, @function
intcall:
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index ef72bae..50f8c5e 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -26,9 +26,8 @@
#include <asm/boot.h>
#include <asm/setup.h>
#include "bitops.h"
-#include <asm/cpufeature.h>
-#include <asm/processor-flags.h>
#include "ctype.h"
+#include "cpuflags.h"
/* Useful macros */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
@@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option)
return __cmdline_find_option_bool(cmd_line_ptr, option);
}
-
/* cpu.c, cpucheck.c */
-struct cpu_features {
- int level; /* Family, or 64 for x86-64 */
- int model;
- u32 flags[NCAPINTS];
-};
-extern struct cpu_features cpu;
int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
int validate_cpu(void);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index c8a6792..0fcd913 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -28,7 +28,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
- $(obj)/piggy.o
+ $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
new file mode 100644
index 0000000..90a21f4
--- /dev/null
+++ b/arch/x86/boot/compressed/aslr.c
@@ -0,0 +1,316 @@
+#include "misc.h"
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#include <asm/msr.h>
+#include <asm/archrandom.h>
+#include <asm/e820.h>
+
+#include <generated/compile.h>
+#include <linux/module.h>
+#include <linux/uts.h>
+#include <linux/utsname.h>
+#include <generated/utsrelease.h>
+
+/* Simplified build-specific string for starting entropy. */
+static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
+
+#define I8254_PORT_CONTROL 0x43
+#define I8254_PORT_COUNTER0 0x40
+#define I8254_CMD_READBACK 0xC0
+#define I8254_SELECT_COUNTER0 0x02
+#define I8254_STATUS_NOTREADY 0x40
+static inline u16 i8254(void)
+{
+ u16 status, timer;
+
+ do {
+ outb(I8254_PORT_CONTROL,
+ I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ status = inb(I8254_PORT_COUNTER0);
+ timer = inb(I8254_PORT_COUNTER0);
+ timer |= inb(I8254_PORT_COUNTER0) << 8;
+ } while (status & I8254_STATUS_NOTREADY);
+
+ return timer;
+}
+
+static unsigned long rotate_xor(unsigned long hash, const void *area,
+ size_t size)
+{
+ size_t i;
+ unsigned long *ptr = (unsigned long *)area;
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+/* Attempt to create a simple but unpredictable starting entropy. */
+static unsigned long get_random_boot(void)
+{
+ unsigned long hash = 0;
+
+ hash = rotate_xor(hash, build_str, sizeof(build_str));
+ hash = rotate_xor(hash, real_mode, sizeof(*real_mode));
+
+ return hash;
+}
+
+static unsigned long get_random_long(void)
+{
+#ifdef CONFIG_X86_64
+ const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
+#else
+ const unsigned long mix_const = 0x3f39e593UL;
+#endif
+ unsigned long raw, random = get_random_boot();
+ bool use_i8254 = true;
+
+ debug_putstr("KASLR using");
+
+ if (has_cpuflag(X86_FEATURE_RDRAND)) {
+ debug_putstr(" RDRAND");
+ if (rdrand_long(&raw)) {
+ random ^= raw;
+ use_i8254 = false;
+ }
+ }
+
+ if (has_cpuflag(X86_FEATURE_TSC)) {
+ debug_putstr(" RDTSC");
+ rdtscll(raw);
+
+ random ^= raw;
+ use_i8254 = false;
+ }
+
+ if (use_i8254) {
+ debug_putstr(" i8254");
+ random ^= i8254();
+ }
+
+ /* Circular multiply for better bit diffusion */
+ asm("mul %3"
+ : "=a" (random), "=d" (raw)
+ : "a" (random), "rm" (mix_const));
+ random += raw;
+
+ debug_putstr("...\n");
+
+ return random;
+}
+
+struct mem_vector {
+ unsigned long start;
+ unsigned long size;
+};
+
+#define MEM_AVOID_MAX 5
+struct mem_vector mem_avoid[MEM_AVOID_MAX];
+
+static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
+{
+ /* Item at least partially before region. */
+ if (item->start < region->start)
+ return false;
+ /* Item at least partially after region. */
+ if (item->start + item->size > region->start + region->size)
+ return false;
+ return true;
+}
+
+static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
+{
+ /* Item one is entirely before item two. */
+ if (one->start + one->size <= two->start)
+ return false;
+ /* Item one is entirely after item two. */
+ if (one->start >= two->start + two->size)
+ return false;
+ return true;
+}
+
+static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ unsigned long output, unsigned long output_size)
+{
+ u64 initrd_start, initrd_size;
+ u64 cmd_line, cmd_line_size;
+ unsigned long unsafe, unsafe_len;
+ char *ptr;
+
+ /*
+ * Avoid the region that is unsafe to overlap during
+ * decompression (see calculations at top of misc.c).
+ */
+ unsafe_len = (output_size >> 12) + 32768 + 18;
+ unsafe = (unsigned long)input + input_size - unsafe_len;
+ mem_avoid[0].start = unsafe;
+ mem_avoid[0].size = unsafe_len;
+
+ /* Avoid initrd. */
+ initrd_start = (u64)real_mode->ext_ramdisk_image << 32;
+ initrd_start |= real_mode->hdr.ramdisk_image;
+ initrd_size = (u64)real_mode->ext_ramdisk_size << 32;
+ initrd_size |= real_mode->hdr.ramdisk_size;
+ mem_avoid[1].start = initrd_start;
+ mem_avoid[1].size = initrd_size;
+
+ /* Avoid kernel command line. */
+ cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32;
+ cmd_line |= real_mode->hdr.cmd_line_ptr;
+ /* Calculate size of cmd_line. */
+ ptr = (char *)(unsigned long)cmd_line;
+ for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+ ;
+ mem_avoid[2].start = cmd_line;
+ mem_avoid[2].size = cmd_line_size;
+
+ /* Avoid heap memory. */
+ mem_avoid[3].start = (unsigned long)free_mem_ptr;
+ mem_avoid[3].size = BOOT_HEAP_SIZE;
+
+ /* Avoid stack memory. */
+ mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
+ mem_avoid[4].size = BOOT_STACK_SIZE;
+}
+
+/* Does this memory vector overlap a known avoided area? */
+bool mem_avoid_overlap(struct mem_vector *img)
+{
+ int i;
+
+ for (i = 0; i < MEM_AVOID_MAX; i++) {
+ if (mem_overlaps(img, &mem_avoid[i]))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
+unsigned long slot_max = 0;
+
+static void slots_append(unsigned long addr)
+{
+ /* Overflowing the slots list should be impossible. */
+ if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
+ CONFIG_PHYSICAL_ALIGN)
+ return;
+
+ slots[slot_max++] = addr;
+}
+
+static unsigned long slots_fetch_random(void)
+{
+ /* Handle case of no slots stored. */
+ if (slot_max == 0)
+ return 0;
+
+ return slots[get_random_long() % slot_max];
+}
+
+static void process_e820_entry(struct e820entry *entry,
+ unsigned long minimum,
+ unsigned long image_size)
+{
+ struct mem_vector region, img;
+
+ /* Skip non-RAM entries. */
+ if (entry->type != E820_RAM)
+ return;
+
+ /* Ignore entries entirely above our maximum. */
+ if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
+ return;
+
+ /* Ignore entries entirely below our minimum. */
+ if (entry->addr + entry->size < minimum)
+ return;
+
+ region.start = entry->addr;
+ region.size = entry->size;
+
+ /* Potentially raise address to minimum location. */
+ if (region.start < minimum)
+ region.start = minimum;
+
+ /* Potentially raise address to meet alignment requirements. */
+ region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+
+ /* Did we raise the address above the bounds of this e820 region? */
+ if (region.start > entry->addr + entry->size)
+ return;
+
+ /* Reduce size by any delta from the original address. */
+ region.size -= region.start - entry->addr;
+
+ /* Reduce maximum size to fit end of image within maximum limit. */
+ if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
+ region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
+
+ /* Walk each aligned slot and check for avoided areas. */
+ for (img.start = region.start, img.size = image_size ;
+ mem_contains(&region, &img) ;
+ img.start += CONFIG_PHYSICAL_ALIGN) {
+ if (mem_avoid_overlap(&img))
+ continue;
+ slots_append(img.start);
+ }
+}
+
+static unsigned long find_random_addr(unsigned long minimum,
+ unsigned long size)
+{
+ int i;
+ unsigned long addr;
+
+ /* Make sure minimum is aligned. */
+ minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+
+ /* Verify potential e820 positions, appending to slots list. */
+ for (i = 0; i < real_mode->e820_entries; i++) {
+ process_e820_entry(&real_mode->e820_map[i], minimum, size);
+ }
+
+ return slots_fetch_random();
+}
+
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size)
+{
+ unsigned long choice = (unsigned long)output;
+ unsigned long random;
+
+ if (cmdline_find_option_bool("nokaslr")) {
+ debug_putstr("KASLR disabled...\n");
+ goto out;
+ }
+
+ /* Record the various known unsafe memory ranges. */
+ mem_avoid_init((unsigned long)input, input_size,
+ (unsigned long)output, output_size);
+
+ /* Walk e820 and find a random address. */
+ random = find_random_addr(choice, output_size);
+ if (!random) {
+ debug_putstr("KASLR could not find suitable E820 region...\n");
+ goto out;
+ }
+
+ /* Always enforce the minimum. */
+ if (random < choice)
+ goto out;
+
+ choice = random;
+out:
+ return (unsigned char *)choice;
+}
+
+#endif /* CONFIG_RANDOMIZE_BASE */
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
index bffd73b..b68e303 100644
--- a/arch/x86/boot/compressed/cmdline.c
+++ b/arch/x86/boot/compressed/cmdline.c
@@ -1,6 +1,6 @@
#include "misc.h"
-#ifdef CONFIG_EARLY_PRINTK
+#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
static unsigned long fs;
static inline void set_fs(unsigned long seg)
diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c
new file mode 100644
index 0000000..aa31346
--- /dev/null
+++ b/arch/x86/boot/compressed/cpuflags.c
@@ -0,0 +1,12 @@
+#ifdef CONFIG_RANDOMIZE_BASE
+
+#include "../cpuflags.c"
+
+bool has_cpuflag(int flag)
+{
+ get_cpuflags();
+
+ return test_bit(flag, cpu.flags);
+}
+
+#endif
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 5d6f6891..9116aac 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -117,9 +117,11 @@ preferred_addr:
addl %eax, %ebx
notl %eax
andl %eax, %ebx
-#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ cmpl $LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
#endif
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+1:
/* Target address to relocate to for decompression */
addl $z_extract_offset, %ebx
@@ -191,14 +193,14 @@ relocated:
leal boot_heap(%ebx), %eax
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
- call decompress_kernel
+ call decompress_kernel /* returns kernel location in %eax */
addl $24, %esp
/*
* Jump to the decompressed kernel.
*/
xorl %ebx, %ebx
- jmp *%ebp
+ jmp *%eax
/*
* Stack and heap for uncompression
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c337422..c5c1ae0 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -94,9 +94,11 @@ ENTRY(startup_32)
addl %eax, %ebx
notl %eax
andl %eax, %ebx
-#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ cmpl $LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
#endif
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+1:
/* Target address to relocate to for decompression */
addl $z_extract_offset, %ebx
@@ -269,9 +271,11 @@ preferred_addr:
addq %rax, %rbp
notq %rax
andq %rax, %rbp
-#else
- movq $LOAD_PHYSICAL_ADDR, %rbp
+ cmpq $LOAD_PHYSICAL_ADDR, %rbp
+ jge 1f
#endif
+ movq $LOAD_PHYSICAL_ADDR, %rbp
+1:
/* Target address to relocate to for decompression */
leaq z_extract_offset(%rbp), %rbx
@@ -339,13 +343,13 @@ relocated:
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
movq $z_output_len, %r9 /* decompressed length */
- call decompress_kernel
+ call decompress_kernel /* returns kernel location in %rax */
popq %rsi
/*
* Jump to the decompressed kernel.
*/
- jmp *%rbp
+ jmp *%rax
.code32
no_longmode:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 434f077..196eaf3 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */
void *memset(void *s, int c, size_t n);
void *memcpy(void *dest, const void *src, size_t n);
-#ifdef CONFIG_X86_64
-#define memptr long
-#else
-#define memptr unsigned
-#endif
-
-static memptr free_mem_ptr;
-static memptr free_mem_end_ptr;
+memptr free_mem_ptr;
+memptr free_mem_end_ptr;
static char *vidmem;
static int vidport;
@@ -395,7 +389,7 @@ static void parse_elf(void *output)
free(phdrs);
}
-asmlinkage void decompress_kernel(void *rmode, memptr heap,
+asmlinkage void *decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,
@@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
+ output = choose_kernel_location(input_data, input_len,
+ output, output_len);
+
+ /* Validate memory location choices. */
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
error("Destination address inappropriately aligned");
#ifdef CONFIG_X86_64
@@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
parse_elf(output);
handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
- return;
+ return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 674019d..24e3e56 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -23,7 +23,15 @@
#define BOOT_BOOT_H
#include "../ctype.h"
+#ifdef CONFIG_X86_64
+#define memptr long
+#else
+#define memptr unsigned
+#endif
+
/* misc.c */
+extern memptr free_mem_ptr;
+extern memptr free_mem_end_ptr;
extern struct boot_params *real_mode; /* Pointer to real-mode data */
void __putstr(const char *s);
#define error_putstr(__x) __putstr(__x)
@@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s)
#endif
-#ifdef CONFIG_EARLY_PRINTK
-
+#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
/* cmdline.c */
int cmdline_find_option(const char *option, char *buffer, int bufsize);
int cmdline_find_option_bool(const char *option);
+#endif
-/* early_serial_console.c */
-extern int early_serial_base;
-void console_init(void);
+#if CONFIG_RANDOMIZE_BASE
+/* aslr.c */
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size);
+/* cpuflags.c */
+bool has_cpuflag(int flag);
#else
+static inline
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size)
+{
+ return output;
+}
+#endif
+#ifdef CONFIG_EARLY_PRINTK
/* early_serial_console.c */
+extern int early_serial_base;
+void console_init(void);
+#else
static const int early_serial_base;
static inline void console_init(void)
{ }
-
#endif
#endif
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 11f272c..1eb7d29 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -14,7 +14,7 @@
* Memory copy routines
*/
- .code16gcc
+ .code16
.text
GLOBAL(memcpy)
@@ -30,7 +30,7 @@ GLOBAL(memcpy)
rep; movsb
popw %di
popw %si
- ret
+ retl
ENDPROC(memcpy)
GLOBAL(memset)
@@ -45,25 +45,25 @@ GLOBAL(memset)
andw $3, %cx
rep; stosb
popw %di
- ret
+ retl
ENDPROC(memset)
GLOBAL(copy_from_fs)
pushw %ds
pushw %fs
popw %ds
- call memcpy
+ calll memcpy
popw %ds
- ret
+ retl
ENDPROC(copy_from_fs)
GLOBAL(copy_to_fs)
pushw %es
pushw %fs
popw %es
- call memcpy
+ calll memcpy
popw %es
- ret
+ retl
ENDPROC(copy_to_fs)
#if 0 /* Not currently used, but can be enabled as needed */
@@ -71,17 +71,17 @@ GLOBAL(copy_from_gs)
pushw %ds
pushw %gs
popw %ds
- call memcpy
+ calll memcpy
popw %ds
- ret
+ retl
ENDPROC(copy_from_gs)
GLOBAL(copy_to_gs)
pushw %es
pushw %gs
popw %es
- call memcpy
+ calll memcpy
popw %es
- ret
+ retl
ENDPROC(copy_to_gs)
#endif
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 4d3ff03..100a9a1 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -28,8 +28,6 @@
#include <asm/required-features.h>
#include <asm/msr-index.h>
-struct cpu_features cpu;
-static u32 cpu_vendor[3];
static u32 err_flags[NCAPINTS];
static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
@@ -69,92 +67,8 @@ static int is_transmeta(void)
cpu_vendor[2] == A32('M', 'x', '8', '6');
}
-static int has_fpu(void)
-{
- u16 fcw = -1, fsw = -1;
- u32 cr0;
-
- asm("movl %%cr0,%0" : "=r" (cr0));
- if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
- cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
- asm volatile("movl %0,%%cr0" : : "r" (cr0));
- }
-
- asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
- : "+m" (fsw), "+m" (fcw));
-
- return fsw == 0 && (fcw & 0x103f) == 0x003f;
-}
-
-static int has_eflag(u32 mask)
-{
- u32 f0, f1;
-
- asm("pushfl ; "
- "pushfl ; "
- "popl %0 ; "
- "movl %0,%1 ; "
- "xorl %2,%1 ; "
- "pushl %1 ; "
- "popfl ; "
- "pushfl ; "
- "popl %1 ; "
- "popfl"
- : "=&r" (f0), "=&r" (f1)
- : "ri" (mask));
-
- return !!((f0^f1) & mask);
-}
-
-static void get_flags(void)
-{
- u32 max_intel_level, max_amd_level;
- u32 tfms;
-
- if (has_fpu())
- set_bit(X86_FEATURE_FPU, cpu.flags);
-
- if (has_eflag(X86_EFLAGS_ID)) {
- asm("cpuid"
- : "=a" (max_intel_level),
- "=b" (cpu_vendor[0]),
- "=d" (cpu_vendor[1]),
- "=c" (cpu_vendor[2])
- : "a" (0));
-
- if (max_intel_level >= 0x00000001 &&
- max_intel_level <= 0x0000ffff) {
- asm("cpuid"
- : "=a" (tfms),
- "=c" (cpu.flags[4]),
- "=d" (cpu.flags[0])
- : "a" (0x00000001)
- : "ebx");
- cpu.level = (tfms >> 8) & 15;
- cpu.model = (tfms >> 4) & 15;
- if (cpu.level >= 6)
- cpu.model += ((tfms >> 16) & 0xf) << 4;
- }
-
- asm("cpuid"
- : "=a" (max_amd_level)
- : "a" (0x80000000)
- : "ebx", "ecx", "edx");
-
- if (max_amd_level >= 0x80000001 &&
- max_amd_level <= 0x8000ffff) {
- u32 eax = 0x80000001;
- asm("cpuid"
- : "+a" (eax),
- "=c" (cpu.flags[6]),
- "=d" (cpu.flags[1])
- : : "ebx");
- }
- }
-}
-
/* Returns a bitmask of which words we have error bits in */
-static int check_flags(void)
+static int check_cpuflags(void)
{
u32 err;
int i;
@@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
if (has_eflag(X86_EFLAGS_AC))
cpu.level = 4;
- get_flags();
- err = check_flags();
+ get_cpuflags();
+ err = check_cpuflags();
if (test_bit(X86_FEATURE_LM, cpu.flags))
cpu.level = 64;
@@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
eax &= ~(1 << 15);
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- get_flags(); /* Make sure it really did something */
- err = check_flags();
+ get_cpuflags(); /* Make sure it really did something */
+ err = check_cpuflags();
} else if (err == 0x01 &&
!(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
is_centaur() && cpu.model >= 6) {
@@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
set_bit(X86_FEATURE_CX8, cpu.flags);
- err = check_flags();
+ err = check_cpuflags();
} else if (err == 0x01 && is_transmeta()) {
/* Transmeta might have masked feature bits in word 0 */
@@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
: : "ecx", "ebx");
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- err = check_flags();
+ err = check_cpuflags();
}
if (err_flags_ptr)
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
new file mode 100644
index 0000000..a9fcb7c
--- /dev/null
+++ b/arch/x86/boot/cpuflags.c
@@ -0,0 +1,104 @@
+#include <linux/types.h>
+#include "bitops.h"
+
+#include <asm/processor-flags.h>
+#include <asm/required-features.h>
+#include <asm/msr-index.h>
+#include "cpuflags.h"
+
+struct cpu_features cpu;
+u32 cpu_vendor[3];
+
+static bool loaded_flags;
+
+static int has_fpu(void)
+{
+ u16 fcw = -1, fsw = -1;
+ unsigned long cr0;
+
+ asm volatile("mov %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("mov %0,%%cr0" : : "r" (cr0));
+ }
+
+ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+ : "+m" (fsw), "+m" (fcw));
+
+ return fsw == 0 && (fcw & 0x103f) == 0x003f;
+}
+
+int has_eflag(unsigned long mask)
+{
+ unsigned long f0, f1;
+
+ asm volatile("pushf \n\t"
+ "pushf \n\t"
+ "pop %0 \n\t"
+ "mov %0,%1 \n\t"
+ "xor %2,%1 \n\t"
+ "push %1 \n\t"
+ "popf \n\t"
+ "pushf \n\t"
+ "pop %1 \n\t"
+ "popf"
+ : "=&r" (f0), "=&r" (f1)
+ : "ri" (mask));
+
+ return !!((f0^f1) & mask);
+}
+
+/* Handle x86_32 PIC using ebx. */
+#if defined(__i386__) && defined(__PIC__)
+# define EBX_REG "=r"
+#else
+# define EBX_REG "=b"
+#endif
+
+static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d)
+{
+ asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
+ "cpuid \n\t"
+ ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
+ : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
+ : "a" (id)
+ );
+}
+
+void get_cpuflags(void)
+{
+ u32 max_intel_level, max_amd_level;
+ u32 tfms;
+ u32 ignored;
+
+ if (loaded_flags)
+ return;
+ loaded_flags = true;
+
+ if (has_fpu())
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+ cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
+ &cpu_vendor[1]);
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+ cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
+ &cpu.flags[0]);
+ cpu.level = (tfms >> 8) & 15;
+ cpu.model = (tfms >> 4) & 15;
+ if (cpu.level >= 6)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+ cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
+ &ignored);
+
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
+ &cpu.flags[1]);
+ }
+ }
+}
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
new file mode 100644
index 0000000..ea97697
--- /dev/null
+++ b/arch/x86/boot/cpuflags.h
@@ -0,0 +1,19 @@
+#ifndef BOOT_CPUFLAGS_H
+#define BOOT_CPUFLAGS_H
+
+#include <asm/cpufeature.h>
+#include <asm/processor-flags.h>
+
+struct cpu_features {
+ int level; /* Family, or 64 for x86-64 */
+ int model;
+ u32 flags[NCAPINTS];
+};
+
+extern struct cpu_features cpu;
+extern u32 cpu_vendor[3];
+
+int has_eflag(unsigned long mask);
+void get_cpuflags(void);
+
+#endif
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 9ec06a1..ec3b8ba 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -391,7 +391,14 @@ xloadflags:
#else
# define XLF23 0
#endif
- .word XLF0 | XLF1 | XLF23
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC)
+# define XLF4 XLF_EFI_KEXEC
+#else
+# define XLF4 0
+#endif
+
+ .word XLF0 | XLF1 | XLF23 | XLF4
cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
#added with boot protocol
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 0d9ec77..e6a9245 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -39,6 +39,20 @@
#ifdef CONFIG_ARCH_RANDOM
+/* Instead of arch_get_random_long() when alternatives haven't run. */
+static inline int rdrand_long(unsigned long *v)
+{
+ int ok;
+ asm volatile("1: " RDRAND_LONG "\n\t"
+ "jc 2f\n\t"
+ "decl %0\n\t"
+ "jnz 1b\n\t"
+ "2:"
+ : "=r" (ok), "=a" (*v)
+ : "0" (RDRAND_RETRY_LOOPS));
+ return ok;
+}
+
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
{ \
@@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
#endif /* CONFIG_X86_64 */
+#else
+
+static inline int rdrand_long(unsigned long *v)
+{
+ return 0;
+}
+
#endif /* CONFIG_ARCH_RANDOM */
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index c6cd358..04a4890 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -92,12 +92,53 @@
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
+#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For either of these options x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif
/*
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 89270b43..e099f95 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -216,6 +216,7 @@
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 65c6e6e..3b978c4 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -1,6 +1,24 @@
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
+/*
+ * We map the EFI regions needed for runtime services non-contiguously,
+ * with preserved alignment on virtual addresses starting from -4G down
+ * for a total max space of 64G. This way, we provide for stable runtime
+ * services addresses across kernels so that a kexec'd kernel can still
+ * use them.
+ *
+ * This is the main reason why we're doing stable VA mappings for RT
+ * services.
+ *
+ * This flag is used in conjuction with a chicken bit called
+ * "efi=old_map" which can be used as a fallback to the old runtime
+ * services mapping method in case there's some b0rkage with a
+ * particular EFI implementation (haha, it is hard to hold up the
+ * sarcasm here...).
+ */
+#define EFI_OLD_MEMMAP EFI_ARCH_1
+
#ifdef CONFIG_X86_32
#define EFI_LOADER_SIGNATURE "EL32"
@@ -69,24 +87,31 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5), (u64)(a6))
+#define _efi_call_virtX(x, f, ...) \
+({ \
+ efi_status_t __s; \
+ \
+ efi_sync_low_kernel_mappings(); \
+ preempt_disable(); \
+ __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ preempt_enable(); \
+ __s; \
+})
+
#define efi_call_virt0(f) \
- efi_call0((efi.systab->runtime->f))
-#define efi_call_virt1(f, a1) \
- efi_call1((efi.systab->runtime->f), (u64)(a1))
-#define efi_call_virt2(f, a1, a2) \
- efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2))
-#define efi_call_virt3(f, a1, a2, a3) \
- efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3))
-#define efi_call_virt4(f, a1, a2, a3, a4) \
- efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4))
-#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
- efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4), (u64)(a5))
-#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
- efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
+ _efi_call_virtX(0, f)
+#define efi_call_virt1(f, a1) \
+ _efi_call_virtX(1, f, (u64)(a1))
+#define efi_call_virt2(f, a1, a2) \
+ _efi_call_virtX(2, f, (u64)(a1), (u64)(a2))
+#define efi_call_virt3(f, a1, a2, a3) \
+ _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3))
+#define efi_call_virt4(f, a1, a2, a3, a4) \
+ _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4))
+#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
+ _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5))
+#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
+ _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
u32 type, u64 attribute);
@@ -95,12 +120,28 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
extern int add_efi_memmap;
extern unsigned long x86_efi_facility;
+extern struct efi_scratch efi_scratch;
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int efi_memblock_x86_reserve_range(void);
extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
+extern void __init efi_map_region(efi_memory_desc_t *md);
+extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+extern void efi_sync_low_kernel_mappings(void);
+extern void efi_setup_page_tables(void);
+extern void __init old_map_region(efi_memory_desc_t *md);
+
+struct efi_setup_data {
+ u64 fw_vendor;
+ u64 runtime;
+ u64 tables;
+ u64 smbios;
+ u64 reserved[8];
+};
+
+extern u64 efi_setup;
#ifdef CONFIG_EFI
@@ -110,7 +151,7 @@ static inline bool efi_is_native(void)
}
extern struct console early_efi_console;
-
+extern void parse_efi_setup(u64 phys_addr, u32 data_len);
#else
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
@@ -122,6 +163,7 @@ extern struct console early_efi_console;
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
+static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
#endif /* CONFIG_EFI */
#endif /* _ASM_X86_EFI_H */
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index c49a613..cea1c76 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
- alternative_input(
- ASM_NOP8 ASM_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (tsk->thread.fpu.has_fpu));
+ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ }
return fpu_restore_checking(&tsk->thread.fpu);
}
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index be27ba1..b4c1f54 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- int ret = 0;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- asm volatile("\t" ASM_STAC "\n"
- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
- "2:\t" ASM_CLAC "\n"
- "\t.section .fixup, \"ax\"\n"
- "3:\tmov %3, %0\n"
- "\tjmp 2b\n"
- "\t.previous\n"
- _ASM_EXTABLE(1b, 3b)
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "1" (oldval)
- : "memory"
- );
-
- *uval = oldval;
- return ret;
+ return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
#endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index cba45d9..67d69b8 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
#define trace_interrupt interrupt
#endif
+#define VECTOR_UNDEFINED -1
+#define VECTOR_RETRIGGERED -2
+
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
extern void setup_vector_irq(int cpu);
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 459769d..e34e097 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -51,10 +51,41 @@ struct devs_id {
enum intel_mid_cpu_type {
/* 1 was Moorestown */
INTEL_MID_CPU_CHIP_PENWELL = 2,
+ INTEL_MID_CPU_CHIP_CLOVERVIEW,
+ INTEL_MID_CPU_CHIP_TANGIER,
};
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ * structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+ void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
+ [cpuid] = get_##cpuname##_ops
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+ DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
+ DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
+ DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+};
+
#ifdef CONFIG_X86_INTEL_MID
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -86,8 +117,21 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
* Penwell uses spread spectrum clock, so the freq number is not exactly
* the same as reported by MSR based on SDM.
*/
-#define PENWELL_FSB_FREQ_83SKU 83200
-#define PENWELL_FSB_FREQ_100SKU 99840
+#define FSB_FREQ_83SKU 83200
+#define FSB_FREQ_100SKU 99840
+#define FSB_FREQ_133SKU 133000
+
+#define FSB_FREQ_167SKU 167000
+#define FSB_FREQ_200SKU 200000
+#define FSB_FREQ_267SKU 267000
+#define FSB_FREQ_333SKU 333000
+#define FSB_FREQ_400SKU 400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK 0x7
+#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
#define SFI_MTMR_MAX_NUM 8
#define SFI_MRTC_MAX 8
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
new file mode 100644
index 0000000..8e71c79
--- /dev/null
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -0,0 +1,90 @@
+/*
+ * iosf_mbi.h: Intel OnChip System Fabric MailBox access support
+ */
+
+#ifndef IOSF_MBI_SYMS_H
+#define IOSF_MBI_SYMS_H
+
+#define MBI_MCR_OFFSET 0xD0
+#define MBI_MDR_OFFSET 0xD4
+#define MBI_MCRX_OFFSET 0xD8
+
+#define MBI_RD_MASK 0xFEFFFFFF
+#define MBI_WR_MASK 0X01000000
+
+#define MBI_MASK_HI 0xFFFFFF00
+#define MBI_MASK_LO 0x000000FF
+#define MBI_ENABLE 0xF0
+
+/* Baytrail available units */
+#define BT_MBI_UNIT_AUNIT 0x00
+#define BT_MBI_UNIT_SMC 0x01
+#define BT_MBI_UNIT_CPU 0x02
+#define BT_MBI_UNIT_BUNIT 0x03
+#define BT_MBI_UNIT_PMC 0x04
+#define BT_MBI_UNIT_GFX 0x06
+#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_USB 0x43
+#define BT_MBI_UNIT_SATA 0xA3
+#define BT_MBI_UNIT_PCIE 0xA6
+
+/* Baytrail read/write opcodes */
+#define BT_MBI_AUNIT_READ 0x10
+#define BT_MBI_AUNIT_WRITE 0x11
+#define BT_MBI_SMC_READ 0x10
+#define BT_MBI_SMC_WRITE 0x11
+#define BT_MBI_CPU_READ 0x10
+#define BT_MBI_CPU_WRITE 0x11
+#define BT_MBI_BUNIT_READ 0x10
+#define BT_MBI_BUNIT_WRITE 0x11
+#define BT_MBI_PMC_READ 0x06
+#define BT_MBI_PMC_WRITE 0x07
+#define BT_MBI_GFX_READ 0x00
+#define BT_MBI_GFX_WRITE 0x01
+#define BT_MBI_SMIO_READ 0x06
+#define BT_MBI_SMIO_WRITE 0x07
+#define BT_MBI_USB_READ 0x06
+#define BT_MBI_USB_WRITE 0x07
+#define BT_MBI_SATA_READ 0x00
+#define BT_MBI_SATA_WRITE 0x01
+#define BT_MBI_PCIE_READ 0x00
+#define BT_MBI_PCIE_WRITE 0x01
+
+/**
+ * iosf_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * iosf_mbi_write() - MailBox unmasked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * iosf_mbi_modify() - MailBox masked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data being modified
+ * @mask: mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#endif /* IOSF_MBI_SYMS_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 0ea10f27..cb6cfcd 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h>
+extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void);
extern void irq_force_complete_move(int);
#endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c696a86..6e4ce2d 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -118,7 +118,6 @@ extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);
#include <linux/percpu.h>
-#include <linux/init.h>
#include <linux/atomic.h>
extern int mce_p5_enabled;
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index f98bd66..b59827e 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -1,6 +1,21 @@
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
+#define native_rdmsr(msr, val1, val2) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (void)((val1) = (u32)__val); \
+ (void)((val2) = (u32)(__val >> 32)); \
+} while (0)
+
+#define native_wrmsr(msr, low, high) \
+ native_write_msr(msr, low, high)
+
+#define native_wrmsrl(msr, val) \
+ native_write_msr((msr), \
+ (u32)((u64)(val)), \
+ (u32)((u64)(val) >> 32))
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 4c01917..b7b10b8 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
+#define PATCH_MAX_SIZE PAGE_SIZE
+extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
+
#ifdef CONFIG_MICROCODE_AMD_EARLY
-#ifdef CONFIG_X86_32
-#define MPB_MAX_SIZE PAGE_SIZE
-extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
-#endif
extern void __init load_ucode_amd_bsp(void);
extern void load_ucode_amd_ap(void);
extern int __init save_microcode_in_initrd_amd(void);
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3142a94..3e6b492 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_MPSPEC_H
#define _ASM_X86_MPSPEC_H
-#include <linux/init.h>
#include <asm/mpspec_def.h>
#include <asm/x86_init.h>
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 2f366d0..1da25a5 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_MWAIT_H
#define _ASM_X86_MWAIT_H
+#include <linux/sched.h>
+
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
@@ -13,4 +15,45 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax, %ecx, %edx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
+ :: "a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+{
+ if (!current_set_polling_and_test()) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
+ mb();
+ clflush((void *)&current_thread_info()->flags);
+ mb();
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+ current_clr_polling();
+}
+
#endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index c878924..775873d 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr);
#include <asm-generic/getorder.h>
#define __HAVE_ARCH_GATE_AREA 1
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* __KERNEL__ */
#endif /* _ASM_X86_PAGE_H */
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index 4d550d0..904f528 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -5,10 +5,6 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 43dcd80..8de6d9c 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -39,9 +39,18 @@
#define __VIRTUAL_MASK_SHIFT 47
/*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
+ * Kernel image size is limited to 1GiB due to the fixmap living in the
+ * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
+ * 512MiB by default, leaving 1.5GiB for modules once the page tables
+ * are fully set up. If kernel ASLR is configured, it can extend the
+ * kernel page table mapping, reducing the size of the modules area.
*/
-#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024)
+#if defined(CONFIG_RANDOMIZE_BASE) && \
+ CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT
+#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET
+#else
+#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT
+#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 3bf2dd0..0d193e2 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+/* Bit manipulation helper on pte/pgoff entry */
+static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
+ unsigned long mask, unsigned int leftshift)
+{
+ return ((value >> rightshift) & mask) << leftshift;
+}
+
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
@@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
- & ((1U << PTE_FILE_BITS1) - 1))) \
- + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
- & ((1U << PTE_FILE_BITS2) - 1)) \
- << (PTE_FILE_BITS1)) \
- + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
- & ((1U << PTE_FILE_BITS3) - 1)) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = \
- ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
- + ((((off) >> PTE_FILE_BITS1) \
- & ((1U << PTE_FILE_BITS2) - 1)) \
- << PTE_FILE_SHIFT2) \
- + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- & ((1U << PTE_FILE_BITS3) - 1)) \
- << PTE_FILE_SHIFT3) \
- + ((((off) >> \
- (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
- << PTE_FILE_SHIFT4) \
- + _PAGE_FILE })
+#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
+#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
+#define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1)
+
+#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
+#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+#define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)
+
+static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+{
+ return (pgoff_t)
+ (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4));
+}
+
+static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+{
+ return (pte_t){
+ .pte_low =
+ pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
+ pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
+ pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) +
+ pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) +
+ _PAGE_FILE,
+ };
+}
#else /* CONFIG_MEM_SOFT_DIRTY */
@@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> PTE_FILE_SHIFT1) \
- & ((1U << PTE_FILE_BITS1) - 1)) \
- + ((((pte).pte_low >> PTE_FILE_SHIFT2) \
- & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \
- + (((pte).pte_low >> PTE_FILE_SHIFT3) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2)))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = \
- (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
- + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \
- << PTE_FILE_SHIFT2) \
- + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- << PTE_FILE_SHIFT3) \
- + _PAGE_FILE })
+#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
+#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
+
+#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
+#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+
+static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+{
+ return (pgoff_t)
+ (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
+}
+
+static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+{
+ return (pte_t){
+ .pte_low =
+ pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
+ pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
+ pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
+ _PAGE_FILE,
+ };
+}
#endif /* CONFIG_MEM_SOFT_DIRTY */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 2d88344..c883bf7 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t;
#define VMALLOC_START _AC(0xffffc90000000000, UL)
#define VMALLOC_END _AC(0xffffe8ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffea0000000000, UL)
-#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0ecac25..a83aa44 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -382,7 +382,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
*/
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern phys_addr_t slow_virt_to_phys(void *__address);
-
+extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7b034a4..fdedd38 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -27,7 +27,6 @@ struct mm_struct;
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/irqflags.h>
@@ -72,6 +71,7 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_1g[NR_INFO];
extern s8 __read_mostly tlb_flushall_shift;
/*
@@ -370,6 +370,20 @@ struct ymmh_struct {
u32 ymmh_space[64];
};
+/* We don't support LWP yet: */
+struct lwp_struct {
+ u8 reserved[128];
+};
+
+struct bndregs_struct {
+ u64 bndregs[8];
+} __packed;
+
+struct bndcsr_struct {
+ u64 cfg_reg_u;
+ u64 status_reg;
+} __packed;
+
struct xsave_hdr_struct {
u64 xstate_bv;
u64 reserved1[2];
@@ -380,6 +394,9 @@ struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
struct ymmh_struct ymmh;
+ struct lwp_struct lwp;
+ struct bndregs_struct bndregs;
+ struct bndcsr_struct bndcsr;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
@@ -700,29 +717,6 @@ static inline void sync_core(void)
#endif
}
-static inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
-{
- /* "monitor %eax, %ecx, %edx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc8;"
- :: "a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
- /* "mwait %eax, %ecx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
-}
-
-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-{
- trace_hardirqs_on();
- /* "mwait %eax, %ecx;" */
- asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
-}
-
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 942a086..14fd6fd 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -60,7 +60,6 @@ struct pt_regs {
#endif /* !__i386__ */
-#include <linux/init.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 59bcf4e..d62c9f8 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -3,7 +3,6 @@
#include <uapi/asm/setup.h>
-
#define COMMAND_LINE_SIZE 2048
#include <linux/linkage.h>
@@ -29,6 +28,8 @@
#include <asm/bootparam.h>
#include <asm/x86_init.h>
+extern u64 relocated_ramdisk;
+
/* Interrupt control for vSMPowered x86_64 systems */
#ifdef CONFIG_X86_64
void vsmp_init(void);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4137890..8cd27e0 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -2,7 +2,6 @@
#define _ASM_X86_SMP_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
-#include <linux/init.h>
#include <asm/percpu.h>
/*
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 34baa0e..a04eabd 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -1,9 +1,9 @@
#ifndef _ASM_X86_TIMER_H
#define _ASM_X86_TIMER_H
-#include <linux/init.h>
#include <linux/pm.h>
#include <linux/percpu.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#define TICK_SIZE (tick_nsec / 1000)
@@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void);
extern int no_timer_check;
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
+/*
+ * We use the full linear equation: f(x) = a + b*x, in order to allow
+ * a continuous function in the face of dynamic freq changes.
*
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
+ * Continuity means that when our frequency changes our slope (b); we want to
+ * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t.
*
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
+ * Without an offset (a) the above would not be possible.
*
- * We can use khz divisor instead of mhz to keep a better precision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- *
- * In:
- *
- * ns = cycles * cyc2ns_scale / SC
- *
- * Although we may still have enough bits to store the value of ns,
- * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
- * leading to an incorrect result.
- *
- * To avoid this, we can decompose 'cycles' into quotient and remainder
- * of division by SC. Then,
- *
- * ns = (quot * SC + rem) * cyc2ns_scale / SC
- * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
- *
- * - sqazi@google.com
+ * See the comment near cycles_2_ns() for details on how we compute (b).
*/
-
-DECLARE_PER_CPU(unsigned long, cyc2ns);
-DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
-
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
-{
- int cpu = smp_processor_id();
- unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
- (1UL << CYC2NS_SCALE_FACTOR));
- return ns;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- unsigned long long ns;
- unsigned long flags;
-
- local_irq_save(flags);
- ns = __cycles_2_ns(cyc);
- local_irq_restore(flags);
-
- return ns;
-}
+struct cyc2ns_data {
+ u32 cyc2ns_mul;
+ u32 cyc2ns_shift;
+ u64 cyc2ns_offset;
+ u32 __count;
+ /* u32 hole */
+}; /* 24 bytes -- do not grow */
+
+extern struct cyc2ns_data *cyc2ns_read_begin(void);
+extern void cyc2ns_read_end(struct cyc2ns_data *);
#endif /* _ASM_X86_TIMER_H */
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 235be70..57ae63c 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -65,4 +65,7 @@ extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
+/* MSR based TSC calibration for Intel Atom SoC platforms */
+int try_msr_calibrate_tsc(unsigned long *fast_calibrate);
+
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8ec57c0..0d592e0 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -40,22 +40,30 @@
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
- *
- * This is equivalent to the following test:
- * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
- *
- * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
*/
+static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
+{
+ /*
+ * If we have used "sizeof()" for the size,
+ * we know it won't overflow the limit (but
+ * it might overflow the 'addr', so it's
+ * important to subtract the size from the
+ * limit, not add it to the address).
+ */
+ if (__builtin_constant_p(size))
+ return addr > limit - size;
+
+ /* Arbitrary sizes? Be careful about overflow */
+ addr += size;
+ if (addr < size)
+ return true;
+ return addr > limit;
+}
#define __range_not_ok(addr, size, limit) \
({ \
- unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
- : "=&r" (flag), "=r" (roksum) \
- : "1" (addr), "g" ((long)(size)), \
- "rm" (limit)); \
- flag; \
+ __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
/**
@@ -78,7 +86,7 @@
* this function, memory access functions may still return -EFAULT.
*/
#define access_ok(type, addr, size) \
- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
+ likely(!__range_not_ok(addr, size, user_addr_max()))
/*
* The exception table consists of pairs of addresses relative to the
@@ -525,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+
+#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
+({ \
+ int __ret = 0; \
+ __typeof__(ptr) __uval = (uval); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ switch (size) { \
+ case 1: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "q" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 2: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 4: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 8: \
+ { \
+ if (!IS_ENABLED(CONFIG_X86_64)) \
+ __cmpxchg_wrong_size(); \
+ \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_wrong_size(); \
+ } \
+ *__uval = __old; \
+ __ret; \
+})
+
+#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
+({ \
+ access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
+ __user_atomic_cmpxchg_inatomic((uval), (ptr), \
+ (old), (new), sizeof(*(ptr))) : \
+ -EFAULT; \
+})
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 190413d..12a26b9 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -204,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
- return __copy_from_user_nocheck(dst, (__force const void *)src, size);
+ return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
- return __copy_to_user_nocheck((__force void *)dst, src, size);
+ return __copy_to_user_nocheck(dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 0415cda..5547389 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -9,6 +9,8 @@
#define XSTATE_FP 0x1
#define XSTATE_SSE 0x2
#define XSTATE_YMM 0x4
+#define XSTATE_BNDREGS 0x8
+#define XSTATE_BNDCSR 0x10
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
@@ -20,10 +22,14 @@
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
-/*
- * These are the features that the OS can handle currently.
- */
-#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+/* Supported features which support lazy state saving */
+#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+
+/* Supported features which require eager state saving */
+#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
+
+/* All currently supported features */
+#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 9c3733c..225b098 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,6 +6,7 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
+#define SETUP_EFI 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
@@ -23,6 +24,7 @@
#define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1)
#define XLF_EFI_HANDOVER_32 (1<<2)
#define XLF_EFI_HANDOVER_64 (1<<3)
+#define XLF_EFI_KEXEC (1<<4)
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 37813b5..59cea18 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -184,6 +184,7 @@
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h
index 7b3ddc3..bc03eb5 100644
--- a/arch/x86/include/uapi/asm/stat.h
+++ b/arch/x86/include/uapi/asm/stat.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_STAT_H
#define _ASM_X86_STAT_H
+#include <asm/posix_types.h>
+
#define STAT_HAVE_NSEC 1
#ifdef __i386__
@@ -78,26 +80,26 @@ struct stat64 {
#else /* __i386__ */
struct stat {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad0;
- unsigned long st_rdev;
- long st_size;
- long st_blksize;
- long st_blocks; /* Number 512-byte blocks allocated. */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- long __unused[3];
+ __kernel_ulong_t st_dev;
+ __kernel_ulong_t st_ino;
+ __kernel_ulong_t st_nlink;
+
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ __kernel_ulong_t st_rdev;
+ __kernel_long_t st_size;
+ __kernel_long_t st_blksize;
+ __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */
+
+ __kernel_ulong_t st_atime;
+ __kernel_ulong_t st_atime_nsec;
+ __kernel_ulong_t st_mtime;
+ __kernel_ulong_t st_mtime_nsec;
+ __kernel_ulong_t st_ctime;
+ __kernel_ulong_t st_ctime_nsec;
+ __kernel_long_t __unused[3];
};
/* We don't need to memset the whole thing just to initialize the padding */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9b0a34e..cb648c8 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,10 +29,11 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-y += syscall_$(BITS).o
obj-$(CONFIG_X86_64) += vsyscall_64.o
obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
+obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
-obj-y += tsc.o io_delay.o rtc.o
+obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
@@ -91,15 +92,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
-obj-$(CONFIG_MICROCODE_EARLY) += microcode_core_early.o
-obj-$(CONFIG_MICROCODE_INTEL_EARLY) += microcode_intel_early.o
-obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o
-microcode-y := microcode_core.o
-microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
-microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
-obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o
-obj-$(CONFIG_MICROCODE) += microcode.o
-
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
@@ -111,6 +103,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
+obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
###
# 64 bit specific files
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index d2b7f27..e69182f 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d278736..7f26c9a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -75,6 +75,13 @@ unsigned int max_physical_apicid;
physid_mask_t phys_cpu_present_map;
/*
+ * Processor to be disabled specified by kernel parameter
+ * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
+ * avoid undefined behaviour caused by sending INIT from AP to BSP.
+ */
+static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
+
+/*
* Map cpu index to physical APIC ID
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
@@ -1968,7 +1975,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
*/
static inline void __smp_error_interrupt(struct pt_regs *regs)
{
- u32 v0, v1;
+ u32 v;
u32 i = 0;
static const char * const error_interrupt_reason[] = {
"Send CS error", /* APIC Error Bit 0 */
@@ -1982,21 +1989,20 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
};
/* First tickle the hardware, only then report what went on. -- REW */
- v0 = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
- v1 = apic_read(APIC_ESR);
+ v = apic_read(APIC_ESR);
ack_APIC_irq();
atomic_inc(&irq_err_count);
- apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
- smp_processor_id(), v0 , v1);
+ apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
+ smp_processor_id(), v);
- v1 = v1 & 0xff;
- while (v1) {
- if (v1 & 0x1)
+ v &= 0xff;
+ while (v) {
+ if (v & 0x1)
apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
i++;
- v1 >>= 1;
+ v >>= 1;
}
apic_printk(APIC_DEBUG, KERN_CONT "\n");
@@ -2115,6 +2121,39 @@ int generic_processor_info(int apicid, int version)
phys_cpu_present_map);
/*
+ * boot_cpu_physical_apicid is designed to have the apicid
+ * returned by read_apic_id(), i.e, the apicid of the
+ * currently booting-up processor. However, on some platforms,
+ * it is temporarily modified by the apicid reported as BSP
+ * through MP table. Concretely:
+ *
+ * - arch/x86/kernel/mpparse.c: MP_processor_info()
+ * - arch/x86/mm/amdtopology.c: amd_numa_init()
+ * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info()
+ *
+ * This function is executed with the modified
+ * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
+ * parameter doesn't work to disable APs on kdump 2nd kernel.
+ *
+ * Since fixing handling of boot_cpu_physical_apicid requires
+ * another discussion and tests on each platform, we leave it
+ * for now and here we use read_apic_id() directly in this
+ * function, generic_processor_info().
+ */
+ if (disabled_cpu_apicid != BAD_APICID &&
+ disabled_cpu_apicid != read_apic_id() &&
+ disabled_cpu_apicid == apicid) {
+ int thiscpu = num_processors + disabled_cpus;
+
+ pr_warning("APIC: Disabling requested cpu."
+ " Processor %d/0x%x ignored.\n",
+ thiscpu, apicid);
+
+ disabled_cpus++;
+ return -ENODEV;
+ }
+
+ /*
* If boot cpu has not been detected yet, then only allow upto
* nr_cpu_ids - 1 processors and keep one slot free for boot cpu
*/
@@ -2592,3 +2631,12 @@ static int __init lapic_insert_resource(void)
* that is using request_resource
*/
late_initcall(lapic_insert_resource);
+
+static int __init apic_set_disabled_cpu_apicid(char *arg)
+{
+ if (!arg || !get_option(&arg, &disabled_cpu_apicid))
+ return -EINVAL;
+
+ return 0;
+}
+early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 00c77cf..5d5b9eb 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -14,7 +14,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <asm/smp.h>
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index e145f28..191ce75 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e63a5bd..a43f068 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1142,9 +1142,10 @@ next:
if (test_bit(vector, used_vectors))
goto next;
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
- if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+ if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
goto next;
+ }
/* Found one! */
current_vector = vector;
current_offset = offset;
@@ -1183,7 +1184,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
vector = cfg->vector;
for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
cfg->vector = 0;
cpumask_clear(cfg->domain);
@@ -1191,11 +1192,10 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
- vector++) {
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
continue;
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
break;
}
}
@@ -1228,12 +1228,12 @@ void __setup_vector_irq(int cpu)
/* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) {
irq = per_cpu(vector_irq, cpu)[vector];
- if (irq < 0)
+ if (irq <= VECTOR_UNDEFINED)
continue;
cfg = irq_cfg(irq);
if (!cpumask_test_cpu(cpu, cfg->domain))
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
}
raw_spin_unlock(&vector_lock);
}
@@ -2202,13 +2202,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- unsigned int irq;
+ int irq;
unsigned int irr;
struct irq_desc *desc;
struct irq_cfg *cfg;
irq = __this_cpu_read(vector_irq[vector]);
- if (irq == -1)
+ if (irq <= VECTOR_UNDEFINED)
continue;
desc = irq_to_desc(irq);
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 7434d85..6207156 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -1,6 +1,5 @@
#include <linux/cpumask.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/delay.h>
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 77c95c0..00146f9 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) "summit: %s: " fmt, __func__
#include <linux/mm.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 140e29d..cac85ee 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -3,7 +3,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/dmar.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 562a76d..de231e3 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -3,7 +3,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 47b56a7..7fd54f0 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -36,12 +36,13 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o
endif
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
+obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bca023b..d3153e2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1,5 +1,4 @@
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/elf.h>
#include <linux/mm.h>
@@ -487,7 +486,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (!check_tsc_unstable())
- sched_clock_stable = 1;
+ set_sched_clock_stable();
}
#ifdef CONFIG_X86_64
@@ -508,6 +507,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
+
+ /* F16h erratum 793, CVE-2013-6885 */
+ if (c->x86 == 0x16 && c->x86_model <= 0xf) {
+ u64 val;
+
+ rdmsrl(MSR_AMD64_LS_CFG, val);
+ if (!(val & BIT(15)))
+ wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
+ }
+
}
static const int amd_erratum_383[];
@@ -790,14 +799,10 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
}
/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
- if (!((eax >> 16) & mask)) {
- u32 a, b, c, d;
-
- cpuid(0x80000005, &a, &b, &c, &d);
- tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff;
- } else {
+ if (!((eax >> 16) & mask))
+ tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
+ else
tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
- }
/* a 4M entry uses two 2M entries */
tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8d5652d..8779eda 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -1,6 +1,5 @@
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/e820.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6abc172..24b6fd1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -472,6 +472,7 @@ u16 __read_mostly tlb_lli_4m[NR_INFO];
u16 __read_mostly tlb_lld_4k[NR_INFO];
u16 __read_mostly tlb_lld_2m[NR_INFO];
u16 __read_mostly tlb_lld_4m[NR_INFO];
+u16 __read_mostly tlb_lld_1g[NR_INFO];
/*
* tlb_flushall_shift shows the balance point in replacing cr3 write
@@ -486,13 +487,13 @@ void cpu_detect_tlb(struct cpuinfo_x86 *c)
if (this_cpu->c_detect_tlb)
this_cpu->c_detect_tlb(c);
- printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
- "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
+ printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n"
+ "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n"
"tlb_flushall_shift: %d\n",
tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
- tlb_flushall_shift);
+ tlb_lld_1g[ENTRIES], tlb_flushall_shift);
}
void detect_ht(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d0969c7..aaf152e 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc1ec0d..3db61c6 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -93,7 +92,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (!check_tsc_unstable())
- sched_clock_stable = 1;
+ set_sched_clock_stable();
}
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
@@ -387,7 +386,8 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS);
}
- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64
@@ -505,6 +505,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
#define TLB_DATA0_2M_4M 0x23
#define STLB_4K 0x41
+#define STLB_4K_2M 0x42
static const struct _tlb_table intel_tlb_table[] = {
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
@@ -525,13 +526,20 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
+ { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
+ { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
{ 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
{ 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
{ 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
+ { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" },
+ { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" },
{ 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
{ 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
+ { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
+ { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
{ 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
{ 0x00, 0, 0 }
};
@@ -557,6 +565,20 @@ static void intel_tlb_lookup(const unsigned char desc)
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
break;
+ case STLB_4K_2M:
+ if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
case TLB_INST_ALL:
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
@@ -602,6 +624,10 @@ static void intel_tlb_lookup(const unsigned char desc)
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
+ case TLB_DATA_1G:
+ if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
+ break;
}
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index de8b60a..a1aef95 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -33,22 +33,28 @@
#include <linux/acpi.h>
#include <linux/cper.h>
#include <acpi/apei.h>
+#include <acpi/ghes.h>
#include <asm/mce.h>
#include "mce-internal.h"
-void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
+void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
{
struct mce m;
- /* Only corrected MC is reported */
- if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA))
+ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
return;
mce_setup(&m);
m.bank = 1;
- /* Fake a memory read corrected error with unknown channel */
+ /* Fake a memory read error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+
+ if (severity >= GHES_SEV_RECOVERABLE)
+ m.status |= MCI_STATUS_UC;
+ if (severity >= GHES_SEV_PANIC)
+ m.status |= MCI_STATUS_PCC;
+
m.addr = mem_err->physical_addr;
mce_log(&m);
mce_notify_irq();
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b3218cd..4d5419b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1638,15 +1638,15 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
{
- unsigned long iv = mce_adjust_timer(check_interval * HZ);
-
- __this_cpu_write(mce_next_interval, iv);
+ unsigned long iv = check_interval * HZ;
if (mca_cfg.ignore_ce || !iv)
return;
+ per_cpu(mce_next_interval, cpu) = iv;
+
t->expires = round_jiffies(jiffies + iv);
- add_timer_on(t, smp_processor_id());
+ add_timer_on(t, cpu);
}
static void __mcheck_cpu_init_timer(void)
@@ -2272,8 +2272,10 @@ static int mce_device_create(unsigned int cpu)
dev->release = &mce_device_release;
err = device_register(dev);
- if (err)
+ if (err) {
+ put_device(dev);
return err;
+ }
for (i = 0; mce_device_attrs[i]; i++) {
err = device_create_file(dev, mce_device_attrs[i]);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 4cfe045..fb6156f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -6,7 +6,6 @@
*/
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/sched.h>
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 1c044b1..a304298 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/processor.h>
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index e9a701a..7dc5564 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/mce.h>
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile
new file mode 100644
index 0000000..285c854
--- /dev/null
+++ b/arch/x86/kernel/cpu/microcode/Makefile
@@ -0,0 +1,7 @@
+microcode-y := core.o
+obj-$(CONFIG_MICROCODE) += microcode.o
+microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
+microcode-$(CONFIG_MICROCODE_AMD) += amd.o
+obj-$(CONFIG_MICROCODE_EARLY) += core_early.o
+obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o
+obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index c3d4cc9..8fffd84 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -182,10 +182,10 @@ int __apply_microcode_amd(struct microcode_amd *mc_amd)
{
u32 rev, dummy;
- wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
+ native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
/* verify patch application was successful */
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev != mc_amd->hdr.patch_id)
return -1;
@@ -332,6 +332,9 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
patch->patch_id = mc_hdr->patch_id;
patch->equiv_cpu = proc_id;
+ pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
+ __func__, patch->patch_id, proc_id);
+
/* ... and add to cache. */
update_cache(patch);
@@ -390,9 +393,9 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
struct ucode_patch *p = find_patch(smp_processor_id());
if (p) {
- memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
- memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
- MPB_MAX_SIZE));
+ memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+ memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
+ PATCH_MAX_SIZE));
}
}
#endif
@@ -430,7 +433,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
if (c->x86 >= 0x15)
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
- if (request_firmware(&fw, (const char *)fw_name, device)) {
+ if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
pr_debug("failed to load file %s\n", fw_name);
goto out;
}
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 6073104..8384c0f 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -2,6 +2,7 @@
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Jacob Shin <jacob.shin@amd.com>
+ * Fixes: Borislav Petkov <bp@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,10 +16,18 @@
#include <asm/setup.h>
#include <asm/microcode_amd.h>
-static bool ucode_loaded;
+/*
+ * This points to the current valid container of microcode patches which we will
+ * save from the initrd before jettisoning its contents.
+ */
+static u8 *container;
+static size_t container_size;
+
static u32 ucode_new_rev;
-static unsigned long ucode_offset;
-static size_t ucode_size;
+u8 amd_ucode_patch[PATCH_MAX_SIZE];
+static u16 this_equiv_id;
+
+struct cpio_data ucode_cpio;
/*
* Microcode patch container file is prepended to the initrd in cpio format.
@@ -32,9 +41,6 @@ static struct cpio_data __init find_ucode_in_initrd(void)
char *path;
void *start;
size_t size;
- unsigned long *uoffset;
- size_t *usize;
- struct cpio_data cd;
#ifdef CONFIG_X86_32
struct boot_params *p;
@@ -47,30 +53,50 @@ static struct cpio_data __init find_ucode_in_initrd(void)
path = (char *)__pa_nodebug(ucode_path);
start = (void *)p->hdr.ramdisk_image;
size = p->hdr.ramdisk_size;
- uoffset = (unsigned long *)__pa_nodebug(&ucode_offset);
- usize = (size_t *)__pa_nodebug(&ucode_size);
#else
path = ucode_path;
start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
size = boot_params.hdr.ramdisk_size;
- uoffset = &ucode_offset;
- usize = &ucode_size;
#endif
- cd = find_cpio_data(path, start, size, &offset);
- if (!cd.data)
- return cd;
+ return find_cpio_data(path, start, size, &offset);
+}
- if (*(u32 *)cd.data != UCODE_MAGIC) {
- cd.data = NULL;
- cd.size = 0;
- return cd;
- }
+static size_t compute_container_size(u8 *data, u32 total_size)
+{
+ size_t size = 0;
+ u32 *header = (u32 *)data;
- *uoffset = (u8 *)cd.data - (u8 *)start;
- *usize = cd.size;
+ if (header[0] != UCODE_MAGIC ||
+ header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
+ header[2] == 0) /* size */
+ return size;
- return cd;
+ size = header[2] + CONTAINER_HDR_SZ;
+ total_size -= size;
+ data += size;
+
+ while (total_size) {
+ u16 patch_size;
+
+ header = (u32 *)data;
+
+ if (header[0] != UCODE_UCODE_TYPE)
+ break;
+
+ /*
+ * Sanity-check patch size.
+ */
+ patch_size = header[1];
+ if (patch_size > PATCH_MAX_SIZE)
+ break;
+
+ size += patch_size + SECTION_HDR_SIZE;
+ data += patch_size + SECTION_HDR_SIZE;
+ total_size -= patch_size + SECTION_HDR_SIZE;
+ }
+
+ return size;
}
/*
@@ -85,23 +111,22 @@ static struct cpio_data __init find_ucode_in_initrd(void)
static void apply_ucode_in_initrd(void *ucode, size_t size)
{
struct equiv_cpu_entry *eq;
+ size_t *cont_sz;
u32 *header;
- u8 *data;
+ u8 *data, **cont;
u16 eq_id = 0;
int offset, left;
- u32 rev, eax;
+ u32 rev, eax, ebx, ecx, edx;
u32 *new_rev;
- unsigned long *uoffset;
- size_t *usize;
#ifdef CONFIG_X86_32
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
- uoffset = (unsigned long *)__pa_nodebug(&ucode_offset);
- usize = (size_t *)__pa_nodebug(&ucode_size);
+ cont_sz = (size_t *)__pa_nodebug(&container_size);
+ cont = (u8 **)__pa_nodebug(&container);
#else
new_rev = &ucode_new_rev;
- uoffset = &ucode_offset;
- usize = &ucode_size;
+ cont_sz = &container_size;
+ cont = &container;
#endif
data = ucode;
@@ -109,23 +134,37 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
header = (u32 *)data;
/* find equiv cpu table */
-
- if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
+ if (header[0] != UCODE_MAGIC ||
+ header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
header[2] == 0) /* size */
return;
- eax = cpuid_eax(0x00000001);
+ eax = 0x00000001;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
while (left > 0) {
eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
+ *cont = data;
+
+ /* Advance past the container header */
offset = header[2] + CONTAINER_HDR_SZ;
data += offset;
left -= offset;
eq_id = find_equiv_id(eq, eax);
- if (eq_id)
+ if (eq_id) {
+ this_equiv_id = eq_id;
+ *cont_sz = compute_container_size(*cont, left + offset);
+
+ /*
+ * truncate how much we need to iterate over in the
+ * ucode update loop below
+ */
+ left = *cont_sz - offset;
break;
+ }
/*
* support multiple container files appended together. if this
@@ -145,19 +184,18 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
/* mark where the next microcode container file starts */
offset = data - (u8 *)ucode;
- *uoffset += offset;
- *usize -= offset;
ucode = data;
}
if (!eq_id) {
- *usize = 0;
+ *cont = NULL;
+ *cont_sz = 0;
return;
}
/* find ucode and update if needed */
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
while (left > 0) {
struct microcode_amd *mc;
@@ -168,73 +206,83 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
break;
mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
- if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id)
- if (__apply_microcode_amd(mc) == 0) {
+
+ if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
+
+ if (!__apply_microcode_amd(mc)) {
rev = mc->hdr.patch_id;
*new_rev = rev;
+
+ /* save ucode patch */
+ memcpy(amd_ucode_patch, mc,
+ min_t(u32, header[1], PATCH_MAX_SIZE));
}
+ }
offset = header[1] + SECTION_HDR_SIZE;
data += offset;
left -= offset;
}
-
- /* mark where this microcode container file ends */
- offset = *usize - (data - (u8 *)ucode);
- *usize -= offset;
-
- if (!(*new_rev))
- *usize = 0;
}
void __init load_ucode_amd_bsp(void)
{
- struct cpio_data cd = find_ucode_in_initrd();
- if (!cd.data)
+ struct cpio_data cp;
+ void **data;
+ size_t *size;
+
+#ifdef CONFIG_X86_32
+ data = (void **)__pa_nodebug(&ucode_cpio.data);
+ size = (size_t *)__pa_nodebug(&ucode_cpio.size);
+#else
+ data = &ucode_cpio.data;
+ size = &ucode_cpio.size;
+#endif
+
+ cp = find_ucode_in_initrd();
+ if (!cp.data)
return;
- apply_ucode_in_initrd(cd.data, cd.size);
+ *data = cp.data;
+ *size = cp.size;
+
+ apply_ucode_in_initrd(cp.data, cp.size);
}
#ifdef CONFIG_X86_32
-u8 amd_bsp_mpb[MPB_MAX_SIZE];
-
/*
* On 32-bit, since AP's early load occurs before paging is turned on, we
* cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
* cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
- * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
- * is used upon resume from suspend.
+ * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
+ * which is used upon resume from suspend.
*/
void load_ucode_amd_ap(void)
{
struct microcode_amd *mc;
- unsigned long *initrd;
- unsigned long *uoffset;
size_t *usize;
- void *ucode;
+ void **ucode;
- mc = (struct microcode_amd *)__pa(amd_bsp_mpb);
+ mc = (struct microcode_amd *)__pa(amd_ucode_patch);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
__apply_microcode_amd(mc);
return;
}
- initrd = (unsigned long *)__pa(&initrd_start);
- uoffset = (unsigned long *)__pa(&ucode_offset);
- usize = (size_t *)__pa(&ucode_size);
+ ucode = (void *)__pa_nodebug(&container);
+ usize = (size_t *)__pa_nodebug(&container_size);
- if (!*usize || !*initrd)
+ if (!*ucode || !*usize)
return;
- ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset);
- apply_ucode_in_initrd(ucode, *usize);
+ apply_ucode_in_initrd(*ucode, *usize);
}
static void __init collect_cpu_sig_on_bsp(void *arg)
{
unsigned int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
#else
@@ -242,36 +290,54 @@ void load_ucode_amd_ap(void)
{
unsigned int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ struct equiv_cpu_entry *eq;
+ struct microcode_amd *mc;
u32 rev, eax;
+ u16 eq_id;
+
+ /* Exit if called on the BSP. */
+ if (!cpu)
+ return;
+
+ if (!container)
+ return;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
- eax = cpuid_eax(0x00000001);
uci->cpu_sig.rev = rev;
uci->cpu_sig.sig = eax;
- if (cpu && !ucode_loaded) {
- void *ucode;
+ eax = cpuid_eax(0x00000001);
+ eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
- if (!ucode_size || !initrd_start)
- return;
+ eq_id = find_equiv_id(eq, eax);
+ if (!eq_id)
+ return;
+
+ if (eq_id == this_equiv_id) {
+ mc = (struct microcode_amd *)amd_ucode_patch;
- ucode = (void *)(initrd_start + ucode_offset);
- eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
- if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
+ if (mc && rev < mc->hdr.patch_id) {
+ if (!__apply_microcode_amd(mc))
+ ucode_new_rev = mc->hdr.patch_id;
+ }
+
+ } else {
+ if (!ucode_cpio.data)
return;
- ucode_loaded = true;
+ /*
+ * AP has a different equivalence ID than BSP, looks like
+ * mixed-steppings silicon so go through the ucode blob anew.
+ */
+ apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
}
-
- apply_microcode_amd(cpu);
}
#endif
int __init save_microcode_in_initrd_amd(void)
{
enum ucode_state ret;
- void *ucode;
u32 eax;
#ifdef CONFIG_X86_32
@@ -280,22 +346,35 @@ int __init save_microcode_in_initrd_amd(void)
if (!uci->cpu_sig.sig)
smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+
+ /*
+ * Take into account the fact that the ramdisk might get relocated
+ * and therefore we need to recompute the container's position in
+ * virtual memory space.
+ */
+ container = (u8 *)(__va((u32)relocated_ramdisk) +
+ ((u32)container - boot_params.hdr.ramdisk_image));
#endif
if (ucode_new_rev)
pr_info("microcode: updated early to new patch_level=0x%08x\n",
ucode_new_rev);
- if (ucode_loaded || !ucode_size || !initrd_start)
- return 0;
+ if (!container)
+ return -EINVAL;
- ucode = (void *)(initrd_start + ucode_offset);
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
- ret = load_microcode_amd(eax, ucode, ucode_size);
+ ret = load_microcode_amd(eax, container, container_size);
if (ret != UCODE_OK)
return -EINVAL;
- ucode_loaded = true;
+ /*
+ * This will be freed any msec now, stash patches for the current
+ * family and switch to patch cache for cpu hotplug, etc later.
+ */
+ container = NULL;
+ container_size = 0;
+
return 0;
}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/cpu/microcode/core.c
index 15c9876..15c9876 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index be7f851..be7f851 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 5fb2ceb..a276fa7 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -278,7 +278,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
- if (request_firmware(&firmware, name, device)) {
+ if (request_firmware_direct(&firmware, name, device)) {
pr_debug("data file %s load failed\n", name);
return UCODE_NFOUND;
}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index 1575deb..18f7391 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -365,16 +365,6 @@ out:
return state;
}
-#define native_rdmsr(msr, val1, val2) \
-do { \
- u64 __val = native_read_msr((msr)); \
- (void)((val1) = (u32)__val); \
- (void)((val2) = (u32)(__val >> 32)); \
-} while (0)
-
-#define native_wrmsr(msr, low, high) \
- native_write_msr(msr, low, high);
-
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
diff --git a/arch/x86/kernel/microcode_intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
index ce69320..ce69320 100644
--- a/arch/x86/kernel/microcode_intel_lib.c
+++ b/arch/x86/kernel/cpu/microcode/intel_lib.c
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8e13293..b886451 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1883,21 +1883,27 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
+ struct cyc2ns_data *data;
+
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
userpg->pmc_width = x86_pmu.cntval_bits;
- if (!sched_clock_stable)
+ if (!sched_clock_stable())
return;
+ data = cyc2ns_read_begin();
+
userpg->cap_user_time = 1;
- userpg->time_mult = this_cpu_read(cyc2ns);
- userpg->time_shift = CYC2NS_SCALE_FACTOR;
- userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
+ userpg->time_mult = data->cyc2ns_mul;
+ userpg->time_shift = data->cyc2ns_shift;
+ userpg->time_offset = data->cyc2ns_offset - now;
userpg->cap_user_time_zero = 1;
- userpg->time_zero = this_cpu_read(cyc2ns_offset);
+ userpg->time_zero = data->cyc2ns_offset;
+
+ cyc2ns_read_end(data);
}
/*
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index e09f0bf..4b8e4d3 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
+#include <linux/syscore_ops.h>
#include <asm/apic.h>
@@ -816,6 +817,18 @@ out:
return ret;
}
+static void ibs_eilvt_setup(void)
+{
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+}
+
static inline int get_ibs_lvt_offset(void)
{
u64 val;
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
+#ifdef CONFIG_PM
+
+static int perf_ibs_suspend(void)
+{
+ clear_APIC_ibs(NULL);
+ return 0;
+}
+
+static void perf_ibs_resume(void)
+{
+ ibs_eilvt_setup();
+ setup_APIC_ibs(NULL);
+}
+
+static struct syscore_ops perf_ibs_syscore_ops = {
+ .resume = perf_ibs_resume,
+ .suspend = perf_ibs_suspend,
+};
+
+static void perf_ibs_pm_init(void)
+{
+ register_syscore_ops(&perf_ibs_syscore_ops);
+}
+
+#else
+
+static inline void perf_ibs_pm_init(void) { }
+
+#endif
+
static int
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- /*
- * Force LVT offset assignment for family 10h: The offsets are
- * not assigned by the BIOS for this family, so the OS is
- * responsible for doing it. If the OS assignment fails, fall
- * back to BIOS settings and try to setup this.
- */
- if (boot_cpu_data.x86 == 0x10)
- force_ibs_eilvt_setup();
+ ibs_eilvt_setup();
if (!ibs_eilvt_valid())
goto out;
+ perf_ibs_pm_init();
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
new file mode 100644
index 0000000..5ad35ad
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -0,0 +1,679 @@
+/*
+ * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
+ * Copyright (C) 2013 Google, Inc., Stephane Eranian
+ *
+ * Intel RAPL interface is specified in the IA-32 Manual Vol3b
+ * section 14.7.1 (September 2013)
+ *
+ * RAPL provides more controls than just reporting energy consumption
+ * however here we only expose the 3 energy consumption free running
+ * counters (pp0, pkg, dram).
+ *
+ * Each of those counters increments in a power unit defined by the
+ * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
+ * but it can vary.
+ *
+ * Counter to rapl events mappings:
+ *
+ * pp0 counter: consumption of all physical cores (power plane 0)
+ * event: rapl_energy_cores
+ * perf code: 0x1
+ *
+ * pkg counter: consumption of the whole processor package
+ * event: rapl_energy_pkg
+ * perf code: 0x2
+ *
+ * dram counter: consumption of the dram domain (servers only)
+ * event: rapl_energy_dram
+ * perf code: 0x3
+ *
+ * dram counter: consumption of the builtin-gpu domain (client only)
+ * event: rapl_energy_gpu
+ * perf code: 0x4
+ *
+ * We manage those counters as free running (read-only). They may be
+ * use simultaneously by other tools, such as turbostat.
+ *
+ * The events only support system-wide mode counting. There is no
+ * sampling support because it does not make sense and is not
+ * supported by the RAPL hardware.
+ *
+ * Because we want to avoid floating-point operations in the kernel,
+ * the events are all reported in fixed point arithmetic (32.32).
+ * Tools must adjust the counts to convert them to Watts using
+ * the duration of the measurement. Tools may use a function such as
+ * ldexp(raw_count, -32);
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include <asm/cpu_device_id.h>
+#include "perf_event.h"
+
+/*
+ * RAPL energy status counters
+ */
+#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
+#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
+#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
+#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
+#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
+#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
+#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */
+#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
+
+/* Clients have PP0, PKG */
+#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
+ 1<<RAPL_IDX_PKG_NRG_STAT|\
+ 1<<RAPL_IDX_PP1_NRG_STAT)
+
+/* Servers have PP0, PKG, RAM */
+#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
+ 1<<RAPL_IDX_PKG_NRG_STAT|\
+ 1<<RAPL_IDX_RAM_NRG_STAT)
+
+/*
+ * event code: LSB 8 bits, passed in attr->config
+ * any other bit is reserved
+ */
+#define RAPL_EVENT_MASK 0xFFULL
+
+#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
+
+#define RAPL_EVENT_DESC(_name, _config) \
+{ \
+ .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \
+ .config = _config, \
+}
+
+#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+
+struct rapl_pmu {
+ spinlock_t lock;
+ int hw_unit; /* 1/2^hw_unit Joule */
+ int n_active; /* number of active events */
+ struct list_head active_list;
+ struct pmu *pmu; /* pointer to rapl_pmu_class */
+ ktime_t timer_interval; /* in ktime_t unit */
+ struct hrtimer hrtimer;
+};
+
+static struct pmu rapl_pmu_class;
+static cpumask_t rapl_cpu_mask;
+static int rapl_cntr_mask;
+
+static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu);
+static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free);
+
+static inline u64 rapl_read_counter(struct perf_event *event)
+{
+ u64 raw;
+ rdmsrl(event->hw.event_base, raw);
+ return raw;
+}
+
+static inline u64 rapl_scale(u64 v)
+{
+ /*
+ * scale delta to smallest unit (1/2^32)
+ * users must then scale back: count * 1/(1e9*2^32) to get Joules
+ * or use ldexp(count, -32).
+ * Watts = Joules/Time delta
+ */
+ return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit);
+}
+
+static u64 rapl_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_raw_count, new_raw_count;
+ s64 delta, sdelta;
+ int shift = RAPL_CNTR_WIDTH;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ rdmsrl(event->hw.event_base, new_raw_count);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count) {
+ cpu_relax();
+ goto again;
+ }
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (event-)time and add that to the generic event.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count.
+ */
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ sdelta = rapl_scale(delta);
+
+ local64_add(sdelta, &event->count);
+
+ return new_raw_count;
+}
+
+static void rapl_start_hrtimer(struct rapl_pmu *pmu)
+{
+ __hrtimer_start_range_ns(&pmu->hrtimer,
+ pmu->timer_interval, 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
+{
+ hrtimer_cancel(&pmu->hrtimer);
+}
+
+static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct perf_event *event;
+ unsigned long flags;
+
+ if (!pmu->n_active)
+ return HRTIMER_NORESTART;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ list_for_each_entry(event, &pmu->active_list, active_entry) {
+ rapl_event_update(event);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ hrtimer_forward_now(hrtimer, pmu->timer_interval);
+
+ return HRTIMER_RESTART;
+}
+
+static void rapl_hrtimer_init(struct rapl_pmu *pmu)
+{
+ struct hrtimer *hr = &pmu->hrtimer;
+
+ hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hr->function = rapl_hrtimer_handle;
+}
+
+static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
+ struct perf_event *event)
+{
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ event->hw.state = 0;
+
+ list_add_tail(&event->active_entry, &pmu->active_list);
+
+ local64_set(&event->hw.prev_count, rapl_read_counter(event));
+
+ pmu->n_active++;
+ if (pmu->n_active == 1)
+ rapl_start_hrtimer(pmu);
+}
+
+static void rapl_pmu_event_start(struct perf_event *event, int mode)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ __rapl_pmu_event_start(pmu, event);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* mark event as deactivated and stopped */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ WARN_ON_ONCE(pmu->n_active <= 0);
+ pmu->n_active--;
+ if (pmu->n_active == 0)
+ rapl_stop_hrtimer(pmu);
+
+ list_del(&event->active_entry);
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+ }
+
+ /* check if update of sw counter is necessary */
+ if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ rapl_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static int rapl_pmu_event_add(struct perf_event *event, int mode)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_START)
+ __rapl_pmu_event_start(pmu, event);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static void rapl_pmu_event_del(struct perf_event *event, int flags)
+{
+ rapl_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int rapl_pmu_event_init(struct perf_event *event)
+{
+ u64 cfg = event->attr.config & RAPL_EVENT_MASK;
+ int bit, msr, ret = 0;
+
+ /* only look at RAPL events */
+ if (event->attr.type != rapl_pmu_class.type)
+ return -ENOENT;
+
+ /* check only supported bits are set */
+ if (event->attr.config & ~RAPL_EVENT_MASK)
+ return -EINVAL;
+
+ /*
+ * check event is known (determines counter)
+ */
+ switch (cfg) {
+ case INTEL_RAPL_PP0:
+ bit = RAPL_IDX_PP0_NRG_STAT;
+ msr = MSR_PP0_ENERGY_STATUS;
+ break;
+ case INTEL_RAPL_PKG:
+ bit = RAPL_IDX_PKG_NRG_STAT;
+ msr = MSR_PKG_ENERGY_STATUS;
+ break;
+ case INTEL_RAPL_RAM:
+ bit = RAPL_IDX_RAM_NRG_STAT;
+ msr = MSR_DRAM_ENERGY_STATUS;
+ break;
+ case INTEL_RAPL_PP1:
+ bit = RAPL_IDX_PP1_NRG_STAT;
+ msr = MSR_PP1_ENERGY_STATUS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check event supported */
+ if (!(rapl_cntr_mask & (1 << bit)))
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ /* must be done before validate_group */
+ event->hw.event_base = msr;
+ event->hw.config = cfg;
+ event->hw.idx = bit;
+
+ return ret;
+}
+
+static void rapl_pmu_event_read(struct perf_event *event)
+{
+ rapl_event_update(event);
+}
+
+static ssize_t rapl_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask);
+
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ return n;
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
+
+static struct attribute *rapl_pmu_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group rapl_pmu_attr_group = {
+ .attrs = rapl_pmu_attrs,
+};
+
+EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
+EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
+EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
+
+EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
+EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
+EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
+
+/*
+ * we compute in 0.23 nJ increments regardless of MSR
+ */
+EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
+EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
+EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
+
+static struct attribute *rapl_events_srv_attr[] = {
+ EVENT_PTR(rapl_cores),
+ EVENT_PTR(rapl_pkg),
+ EVENT_PTR(rapl_ram),
+
+ EVENT_PTR(rapl_cores_unit),
+ EVENT_PTR(rapl_pkg_unit),
+ EVENT_PTR(rapl_ram_unit),
+
+ EVENT_PTR(rapl_cores_scale),
+ EVENT_PTR(rapl_pkg_scale),
+ EVENT_PTR(rapl_ram_scale),
+ NULL,
+};
+
+static struct attribute *rapl_events_cln_attr[] = {
+ EVENT_PTR(rapl_cores),
+ EVENT_PTR(rapl_pkg),
+ EVENT_PTR(rapl_gpu),
+
+ EVENT_PTR(rapl_cores_unit),
+ EVENT_PTR(rapl_pkg_unit),
+ EVENT_PTR(rapl_gpu_unit),
+
+ EVENT_PTR(rapl_cores_scale),
+ EVENT_PTR(rapl_pkg_scale),
+ EVENT_PTR(rapl_gpu_scale),
+ NULL,
+};
+
+static struct attribute_group rapl_pmu_events_group = {
+ .name = "events",
+ .attrs = NULL, /* patched at runtime */
+};
+
+DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
+static struct attribute *rapl_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group rapl_pmu_format_group = {
+ .name = "format",
+ .attrs = rapl_formats_attr,
+};
+
+const struct attribute_group *rapl_attr_groups[] = {
+ &rapl_pmu_attr_group,
+ &rapl_pmu_format_group,
+ &rapl_pmu_events_group,
+ NULL,
+};
+
+static struct pmu rapl_pmu_class = {
+ .attr_groups = rapl_attr_groups,
+ .task_ctx_nr = perf_invalid_context, /* system-wide only */
+ .event_init = rapl_pmu_event_init,
+ .add = rapl_pmu_event_add, /* must have */
+ .del = rapl_pmu_event_del, /* must have */
+ .start = rapl_pmu_event_start,
+ .stop = rapl_pmu_event_stop,
+ .read = rapl_pmu_event_read,
+};
+
+static void rapl_cpu_exit(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
+ int i, phys_id = topology_physical_package_id(cpu);
+ int target = -1;
+
+ /* find a new cpu on same package */
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (phys_id == topology_physical_package_id(i)) {
+ target = i;
+ break;
+ }
+ }
+ /*
+ * clear cpu from cpumask
+ * if was set in cpumask and still some cpu on package,
+ * then move to new cpu
+ */
+ if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0)
+ cpumask_set_cpu(target, &rapl_cpu_mask);
+
+ WARN_ON(cpumask_empty(&rapl_cpu_mask));
+ /*
+ * migrate events and context to new cpu
+ */
+ if (target >= 0)
+ perf_pmu_migrate_context(pmu->pmu, cpu, target);
+
+ /* cancel overflow polling timer for CPU */
+ rapl_stop_hrtimer(pmu);
+}
+
+static void rapl_cpu_init(int cpu)
+{
+ int i, phys_id = topology_physical_package_id(cpu);
+
+ /* check if phys_is is already covered */
+ for_each_cpu(i, &rapl_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i))
+ return;
+ }
+ /* was not found, so add it */
+ cpumask_set_cpu(cpu, &rapl_cpu_mask);
+}
+
+static int rapl_cpu_prepare(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
+ int phys_id = topology_physical_package_id(cpu);
+ u64 ms;
+
+ if (pmu)
+ return 0;
+
+ if (phys_id < 0)
+ return -1;
+
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ if (!pmu)
+ return -1;
+
+ spin_lock_init(&pmu->lock);
+
+ INIT_LIST_HEAD(&pmu->active_list);
+
+ /*
+ * grab power unit as: 1/2^unit Joules
+ *
+ * we cache in local PMU instance
+ */
+ rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit);
+ pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
+ pmu->pmu = &rapl_pmu_class;
+
+ /*
+ * use reference of 200W for scaling the timeout
+ * to avoid missing counter overflows.
+ * 200W = 200 Joules/sec
+ * divide interval by 2 to avoid lockstep (2 * 100)
+ * if hw unit is 32, then we use 2 ms 1/200/2
+ */
+ if (pmu->hw_unit < 32)
+ ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1));
+ else
+ ms = 2;
+
+ pmu->timer_interval = ms_to_ktime(ms);
+
+ rapl_hrtimer_init(pmu);
+
+ /* set RAPL pmu for this cpu for now */
+ per_cpu(rapl_pmu, cpu) = pmu;
+ per_cpu(rapl_pmu_to_free, cpu) = NULL;
+
+ return 0;
+}
+
+static void rapl_cpu_kfree(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu);
+
+ kfree(pmu);
+
+ per_cpu(rapl_pmu_to_free, cpu) = NULL;
+}
+
+static int rapl_cpu_dying(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
+
+ if (!pmu)
+ return 0;
+
+ per_cpu(rapl_pmu, cpu) = NULL;
+
+ per_cpu(rapl_pmu_to_free, cpu) = pmu;
+
+ return 0;
+}
+
+static int rapl_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ rapl_cpu_prepare(cpu);
+ break;
+ case CPU_STARTING:
+ rapl_cpu_init(cpu);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DYING:
+ rapl_cpu_dying(cpu);
+ break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ rapl_cpu_kfree(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ rapl_cpu_exit(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct x86_cpu_id rapl_cpu_match[] = {
+ [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
+ [1] = {},
+};
+
+static int __init rapl_pmu_init(void)
+{
+ struct rapl_pmu *pmu;
+ int cpu, ret;
+
+ /*
+ * check for Intel processor family 6
+ */
+ if (!x86_match_cpu(rapl_cpu_match))
+ return 0;
+
+ /* check supported CPU */
+ switch (boot_cpu_data.x86_model) {
+ case 42: /* Sandy Bridge */
+ case 58: /* Ivy Bridge */
+ case 60: /* Haswell */
+ case 69: /* Haswell-Celeron */
+ rapl_cntr_mask = RAPL_IDX_CLN;
+ rapl_pmu_events_group.attrs = rapl_events_cln_attr;
+ break;
+ case 45: /* Sandy Bridge-EP */
+ case 62: /* IvyTown */
+ rapl_cntr_mask = RAPL_IDX_SRV;
+ rapl_pmu_events_group.attrs = rapl_events_srv_attr;
+ break;
+
+ default:
+ /* unsupported */
+ return 0;
+ }
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ rapl_cpu_prepare(cpu);
+ rapl_cpu_init(cpu);
+ }
+
+ perf_cpu_notifier(rapl_cpu_notifier);
+
+ ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
+ if (WARN_ON(ret)) {
+ pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
+ put_online_cpus();
+ return -1;
+ }
+
+ pmu = __get_cpu_var(rapl_pmu);
+
+ pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
+ " API unit is 2^-32 Joules,"
+ " %d fixed counters"
+ " %llu ms ovfl timer\n",
+ pmu->hw_unit,
+ hweight32(rapl_cntr_mask),
+ ktime_to_ms(pmu->timer_interval));
+
+ put_online_cpus();
+
+ return 0;
+}
+device_initcall(rapl_pmu_init);
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 88db010..384df51 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s)
}
__setup("nordrand", x86_rdrand_setup);
-/* We can't use arch_get_random_long() here since alternatives haven't run */
-static inline int rdrand_long(unsigned long *v)
-{
- int ok;
- asm volatile("1: " RDRAND_LONG "\n\t"
- "jc 2f\n\t"
- "decl %0\n\t"
- "jnz 1b\n\t"
- "2:"
- : "=r" (ok), "=a" (*v)
- : "0" (RDRAND_RETRY_LOOPS));
- return ok;
-}
-
/*
* Force a reseed cycle; we are architecturally guaranteed a reseed
* after no more than 512 128-bit chunks of random data. This also
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index aa0430d..3fa0e5a 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -1,6 +1,5 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index 75c5ad5..ef9c2a0 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 18677a9..a57902e 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -7,7 +7,6 @@
*
*/
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
index 5d3fe8d..f6dfd93 100644
--- a/arch/x86/kernel/doublefault.c
+++ b/arch/x86/kernel/doublefault.c
@@ -1,6 +1,5 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 51e2988..a2a4f46 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller)
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
- leal function_trace_op, %ecx
+ movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
- leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e21b078..1e96c36 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -88,7 +88,7 @@ END(function_hook)
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
- leaq function_trace_op, %rdx
+ movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index f66ff16..a67b47c 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
new file mode 100644
index 0000000..c3aae66
--- /dev/null
+++ b/arch/x86/kernel/iosf_mbi.c
@@ -0,0 +1,226 @@
+/*
+ * IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements access to this interface for those platforms that can
+ * enumerate the device using PCI.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include <asm/iosf_mbi.h>
+
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
+}
+
+static struct pci_dev *mbi_pdev; /* one mbi device */
+
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_read;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_read;
+
+ result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_read;
+
+ return 0;
+
+fail_read:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_write;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_write;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_write;
+
+ return 0;
+
+fail_write:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_read);
+
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_write);
+
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_modify);
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init iosf_mbi_init(void)
+{
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit iosf_mbi_exit(void)
+{
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ if (mbi_pdev) {
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+ }
+}
+
+module_init(iosf_mbi_init);
+module_exit(iosf_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 22d0687..dbb6087 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -193,9 +193,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
if (!handle_irq(irq, regs)) {
ack_APIC_irq();
- if (printk_ratelimit())
- pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
- __func__, smp_processor_id(), vector, irq);
+ if (irq != VECTOR_RETRIGGERED) {
+ pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
+ __func__, smp_processor_id(),
+ vector, irq);
+ } else {
+ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+ }
}
irq_exit();
@@ -262,6 +266,76 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * This cpu is going to be removed and its vectors migrated to the remaining
+ * online cpus. Check to see if there are enough vectors in the remaining cpus.
+ * This function is protected by stop_machine().
+ */
+int check_irq_vectors_for_cpu_disable(void)
+{
+ int irq, cpu;
+ unsigned int this_cpu, vector, this_count, count;
+ struct irq_desc *desc;
+ struct irq_data *data;
+ struct cpumask affinity_new, online_new;
+
+ this_cpu = smp_processor_id();
+ cpumask_copy(&online_new, cpu_online_mask);
+ cpu_clear(this_cpu, online_new);
+
+ this_count = 0;
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+ irq = __this_cpu_read(vector_irq[vector]);
+ if (irq >= 0) {
+ desc = irq_to_desc(irq);
+ data = irq_desc_get_irq_data(desc);
+ cpumask_copy(&affinity_new, data->affinity);
+ cpu_clear(this_cpu, affinity_new);
+
+ /* Do not count inactive or per-cpu irqs. */
+ if (!irq_has_action(irq) || irqd_is_per_cpu(data))
+ continue;
+
+ /*
+ * A single irq may be mapped to multiple
+ * cpu's vector_irq[] (for example IOAPIC cluster
+ * mode). In this case we have two
+ * possibilities:
+ *
+ * 1) the resulting affinity mask is empty; that is
+ * this the down'd cpu is the last cpu in the irq's
+ * affinity mask, or
+ *
+ * 2) the resulting affinity mask is no longer
+ * a subset of the online cpus but the affinity
+ * mask is not zero; that is the down'd cpu is the
+ * last online cpu in a user set affinity mask.
+ */
+ if (cpumask_empty(&affinity_new) ||
+ !cpumask_subset(&affinity_new, &online_new))
+ this_count++;
+ }
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
+ continue;
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+ vector++) {
+ if (per_cpu(vector_irq, cpu)[vector] < 0)
+ count++;
+ }
+ }
+
+ if (count < this_count) {
+ pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
+ this_cpu, this_count, count);
+ return -ERANGE;
+ }
+ return 0;
+}
+
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
@@ -344,7 +418,7 @@ void fixup_irqs(void)
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr;
- if (__this_cpu_read(vector_irq[vector]) < 0)
+ if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
continue;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
@@ -355,11 +429,14 @@ void fixup_irqs(void)
data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data);
raw_spin_lock(&desc->lock);
- if (chip->irq_retrigger)
+ if (chip->irq_retrigger) {
chip->irq_retrigger(data);
+ __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
+ }
raw_spin_unlock(&desc->lock);
}
- __this_cpu_write(vector_irq[vector], -1);
+ if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
+ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
}
}
#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index a2a1fbc..7f50156 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -52,7 +52,7 @@ static struct irqaction irq2 = {
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- [0 ... NR_VECTORS - 1] = -1,
+ [0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED,
};
int vector_used_by_percpu_irq(unsigned int vector)
@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
int cpu;
for_each_online_cpu(cpu) {
- if (per_cpu(vector_irq, cpu)[vector] != -1)
+ if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED)
return 1;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 836f832..7ec1d5f 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -39,7 +39,6 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/kgdb.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/hw_breakpoint.h>
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
new file mode 100644
index 0000000..c2bedae
--- /dev/null
+++ b/arch/x86/kernel/ksysfs.c
@@ -0,0 +1,340 @@
+/*
+ * Architecture specific sysfs attributes in /sys/kernel
+ *
+ * Copyright (C) 2007, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ * Copyright (C) 2013, 2013 Red Hat, Inc.
+ * Dave Young <dyoung@redhat.com>
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+
+static ssize_t version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%04x\n", boot_params.hdr.version);
+}
+
+static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version);
+
+static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ memcpy(buf, (void *)&boot_params + off, count);
+ return count;
+}
+
+static struct bin_attribute boot_params_data_attr = {
+ .attr = {
+ .name = "data",
+ .mode = S_IRUGO,
+ },
+ .read = boot_params_data_read,
+ .size = sizeof(boot_params),
+};
+
+static struct attribute *boot_params_version_attrs[] = {
+ &boot_params_version_attr.attr,
+ NULL,
+};
+
+static struct bin_attribute *boot_params_data_attrs[] = {
+ &boot_params_data_attr,
+ NULL,
+};
+
+static struct attribute_group boot_params_attr_group = {
+ .attrs = boot_params_version_attrs,
+ .bin_attrs = boot_params_data_attrs,
+};
+
+static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr)
+{
+ const char *name;
+
+ name = kobject_name(kobj);
+ return kstrtoint(name, 10, nr);
+}
+
+static int get_setup_data_paddr(int nr, u64 *paddr)
+{
+ int i = 0;
+ struct setup_data *data;
+ u64 pa_data = boot_params.hdr.setup_data;
+
+ while (pa_data) {
+ if (nr == i) {
+ *paddr = pa_data;
+ return 0;
+ }
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ pa_data = data->next;
+ iounmap(data);
+ i++;
+ }
+ return -EINVAL;
+}
+
+static int __init get_setup_data_size(int nr, size_t *size)
+{
+ int i = 0;
+ struct setup_data *data;
+ u64 pa_data = boot_params.hdr.setup_data;
+
+ while (pa_data) {
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+ if (nr == i) {
+ *size = data->len;
+ iounmap(data);
+ return 0;
+ }
+
+ pa_data = data->next;
+ iounmap(data);
+ i++;
+ }
+ return -EINVAL;
+}
+
+static ssize_t type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int nr, ret;
+ u64 paddr;
+ struct setup_data *data;
+
+ ret = kobj_to_setup_data_nr(kobj, &nr);
+ if (ret)
+ return ret;
+
+ ret = get_setup_data_paddr(nr, &paddr);
+ if (ret)
+ return ret;
+ data = ioremap_cache(paddr, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ ret = sprintf(buf, "0x%x\n", data->type);
+ iounmap(data);
+ return ret;
+}
+
+static ssize_t setup_data_data_read(struct file *fp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf,
+ loff_t off, size_t count)
+{
+ int nr, ret = 0;
+ u64 paddr;
+ struct setup_data *data;
+ void *p;
+
+ ret = kobj_to_setup_data_nr(kobj, &nr);
+ if (ret)
+ return ret;
+
+ ret = get_setup_data_paddr(nr, &paddr);
+ if (ret)
+ return ret;
+ data = ioremap_cache(paddr, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ if (off > data->len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (count > data->len - off)
+ count = data->len - off;
+
+ if (!count)
+ goto out;
+
+ ret = count;
+ p = ioremap_cache(paddr + sizeof(*data), data->len);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(buf, p + off, count);
+ iounmap(p);
+out:
+ iounmap(data);
+ return ret;
+}
+
+static struct kobj_attribute type_attr = __ATTR_RO(type);
+
+static struct bin_attribute data_attr = {
+ .attr = {
+ .name = "data",
+ .mode = S_IRUGO,
+ },
+ .read = setup_data_data_read,
+};
+
+static struct attribute *setup_data_type_attrs[] = {
+ &type_attr.attr,
+ NULL,
+};
+
+static struct bin_attribute *setup_data_data_attrs[] = {
+ &data_attr,
+ NULL,
+};
+
+static struct attribute_group setup_data_attr_group = {
+ .attrs = setup_data_type_attrs,
+ .bin_attrs = setup_data_data_attrs,
+};
+
+static int __init create_setup_data_node(struct kobject *parent,
+ struct kobject **kobjp, int nr)
+{
+ int ret = 0;
+ size_t size;
+ struct kobject *kobj;
+ char name[16]; /* should be enough for setup_data nodes numbers */
+ snprintf(name, 16, "%d", nr);
+
+ kobj = kobject_create_and_add(name, parent);
+ if (!kobj)
+ return -ENOMEM;
+
+ ret = get_setup_data_size(nr, &size);
+ if (ret)
+ goto out_kobj;
+
+ data_attr.size = size;
+ ret = sysfs_create_group(kobj, &setup_data_attr_group);
+ if (ret)
+ goto out_kobj;
+ *kobjp = kobj;
+
+ return 0;
+out_kobj:
+ kobject_put(kobj);
+ return ret;
+}
+
+static void __init cleanup_setup_data_node(struct kobject *kobj)
+{
+ sysfs_remove_group(kobj, &setup_data_attr_group);
+ kobject_put(kobj);
+}
+
+static int __init get_setup_data_total_num(u64 pa_data, int *nr)
+{
+ int ret = 0;
+ struct setup_data *data;
+
+ *nr = 0;
+ while (pa_data) {
+ *nr += 1;
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pa_data = data->next;
+ iounmap(data);
+ }
+
+out:
+ return ret;
+}
+
+static int __init create_setup_data_nodes(struct kobject *parent)
+{
+ struct kobject *setup_data_kobj, **kobjp;
+ u64 pa_data;
+ int i, j, nr, ret = 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ if (!pa_data)
+ return 0;
+
+ setup_data_kobj = kobject_create_and_add("setup_data", parent);
+ if (!setup_data_kobj) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = get_setup_data_total_num(pa_data, &nr);
+ if (ret)
+ goto out_setup_data_kobj;
+
+ kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL);
+ if (!kobjp) {
+ ret = -ENOMEM;
+ goto out_setup_data_kobj;
+ }
+
+ for (i = 0; i < nr; i++) {
+ ret = create_setup_data_node(setup_data_kobj, kobjp + i, i);
+ if (ret)
+ goto out_clean_nodes;
+ }
+
+ kfree(kobjp);
+ return 0;
+
+out_clean_nodes:
+ for (j = i - 1; j > 0; j--)
+ cleanup_setup_data_node(*(kobjp + j));
+ kfree(kobjp);
+out_setup_data_kobj:
+ kobject_put(setup_data_kobj);
+out:
+ return ret;
+}
+
+static int __init boot_params_ksysfs_init(void)
+{
+ int ret;
+ struct kobject *boot_params_kobj;
+
+ boot_params_kobj = kobject_create_and_add("boot_params",
+ kernel_kobj);
+ if (!boot_params_kobj) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sysfs_create_group(boot_params_kobj, &boot_params_attr_group);
+ if (ret)
+ goto out_boot_params_kobj;
+
+ ret = create_setup_data_nodes(boot_params_kobj);
+ if (ret)
+ goto out_create_group;
+
+ return 0;
+out_create_group:
+ sysfs_remove_group(boot_params_kobj, &boot_params_attr_group);
+out_boot_params_kobj:
+ kobject_put(boot_params_kobj);
+out:
+ return ret;
+}
+
+arch_initcall(boot_params_ksysfs_init);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 5b19e4d..1667b1d 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/numa.h>
#include <linux/ftrace.h>
#include <linux/suspend.h>
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 871be4a..da15918 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -3,7 +3,6 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
-#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/mm.h>
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 6f1236c..0de43e9 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/reboot.h>
-#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cb233bc..06853e6 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -295,6 +295,8 @@ static void __init reserve_brk(void)
_brk_start = 0;
}
+u64 relocated_ramdisk;
+
#ifdef CONFIG_BLK_DEV_INITRD
static u64 __init get_ramdisk_image(void)
@@ -321,25 +323,24 @@ static void __init relocate_initrd(void)
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 area_size = PAGE_ALIGN(ramdisk_size);
- u64 ramdisk_here;
unsigned long slop, clen, mapaddr;
char *p, *q;
/* We need to move the initrd down into directly mapped mem */
- ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
- area_size, PAGE_SIZE);
+ relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
+ area_size, PAGE_SIZE);
- if (!ramdisk_here)
+ if (!relocated_ramdisk)
panic("Cannot find place for new RAMDISK of size %lld\n",
- ramdisk_size);
+ ramdisk_size);
/* Note: this includes all the mem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- memblock_reserve(ramdisk_here, area_size);
- initrd_start = ramdisk_here + PAGE_OFFSET;
+ memblock_reserve(relocated_ramdisk, area_size);
+ initrd_start = relocated_ramdisk + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
- ramdisk_here, ramdisk_here + ramdisk_size - 1);
+ relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
q = (char *)initrd_start;
@@ -363,7 +364,7 @@ static void __init relocate_initrd(void)
printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
" [mem %#010llx-%#010llx]\n",
ramdisk_image, ramdisk_image + ramdisk_size - 1,
- ramdisk_here, ramdisk_here + ramdisk_size - 1);
+ relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
}
static void __init early_reserve_initrd(void)
@@ -447,6 +448,9 @@ static void __init parse_setup_data(void)
case SETUP_DTB:
add_dtb(pa_data);
break;
+ case SETUP_EFI:
+ parse_efi_setup(pa_data, data_len);
+ break;
default:
break;
}
@@ -824,6 +828,20 @@ static void __init trim_low_memory_range(void)
}
/*
+ * Dump out kernel offset information on panic.
+ */
+static int
+dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+{
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
+ "(relocation range: 0x%lx-0x%lx)\n",
+ (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
+ __START_KERNEL_map, MODULES_VADDR-1);
+
+ return 0;
+}
+
+/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
* for initialization. Note, the efi init code path is determined by the
@@ -924,8 +942,6 @@ void __init setup_arch(char **cmdline_p)
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
- /* update the e820_saved too */
- e820_reserve_setup_data();
copy_edd();
@@ -987,6 +1003,8 @@ void __init setup_arch(char **cmdline_p)
early_dump_pci_devices();
#endif
+ /* update the e820_saved too */
+ e820_reserve_setup_data();
finish_e820_parsing();
if (efi_enabled(EFI_BOOT))
@@ -1248,3 +1266,15 @@ void __init i386_reserve_resources(void)
}
#endif /* CONFIG_X86_32 */
+
+static struct notifier_block kernel_offset_notifier = {
+ .notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_offset_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 85dc05a..a32da80 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1312,6 +1312,12 @@ void cpu_disable_common(void)
int native_cpu_disable(void)
{
+ int ret;
+
+ ret = check_irq_vectors_for_cpu_disable();
+ if (ret)
+ return ret;
+
clear_local_APIC();
cpu_disable_common();
@@ -1417,7 +1423,9 @@ static inline void mwait_play_dead(void)
* The WBINVD is insufficient due to the spurious-wakeup
* case where we return around the loop.
*/
+ mb();
clflush(mwait_ptr);
+ mb();
__monitor(mwait_ptr, 0, 0);
mb();
__mwait(eax, 0);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b857ed8..57409f6 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -211,21 +211,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
exception_exit(prev_state); \
}
-DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
- regs->ip)
-DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
-DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
- regs->ip)
-DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
- coprocessor_segment_overrun)
-DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip )
+DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow )
+DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds )
+DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip )
+DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun )
+DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS )
+DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present )
#ifdef CONFIG_X86_32
-DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
+DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment )
#endif
-DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
- BUS_ADRALN, 0)
+DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 )
#ifdef CONFIG_X86_64
/* Runs on IST stack */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 930e5d4..a3acbac 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -11,6 +11,7 @@
#include <linux/clocksource.h>
#include <linux/percpu.h>
#include <linux/timex.h>
+#include <linux/static_key.h>
#include <asm/hpet.h>
#include <asm/timer.h>
@@ -37,13 +38,244 @@ static int __read_mostly tsc_unstable;
erroneous rdtsc usage on !cpu_has_tsc processors */
static int __read_mostly tsc_disabled = -1;
+static struct static_key __use_tsc = STATIC_KEY_INIT;
+
int tsc_clocksource_reliable;
+
+/*
+ * Use a ring-buffer like data structure, where a writer advances the head by
+ * writing a new data entry and a reader advances the tail when it observes a
+ * new entry.
+ *
+ * Writers are made to wait on readers until there's space to write a new
+ * entry.
+ *
+ * This means that we can always use an {offset, mul} pair to compute a ns
+ * value that is 'roughly' in the right direction, even if we're writing a new
+ * {offset, mul} pair during the clock read.
+ *
+ * The down-side is that we can no longer guarantee strict monotonicity anymore
+ * (assuming the TSC was that to begin with), because while we compute the
+ * intersection point of the two clock slopes and make sure the time is
+ * continuous at the point of switching; we can no longer guarantee a reader is
+ * strictly before or after the switch point.
+ *
+ * It does mean a reader no longer needs to disable IRQs in order to avoid
+ * CPU-Freq updates messing with his times, and similarly an NMI reader will
+ * no longer run the risk of hitting half-written state.
+ */
+
+struct cyc2ns {
+ struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
+ struct cyc2ns_data *head; /* 48 + 8 = 56 */
+ struct cyc2ns_data *tail; /* 56 + 8 = 64 */
+}; /* exactly fits one cacheline */
+
+static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
+
+struct cyc2ns_data *cyc2ns_read_begin(void)
+{
+ struct cyc2ns_data *head;
+
+ preempt_disable();
+
+ head = this_cpu_read(cyc2ns.head);
+ /*
+ * Ensure we observe the entry when we observe the pointer to it.
+ * matches the wmb from cyc2ns_write_end().
+ */
+ smp_read_barrier_depends();
+ head->__count++;
+ barrier();
+
+ return head;
+}
+
+void cyc2ns_read_end(struct cyc2ns_data *head)
+{
+ barrier();
+ /*
+ * If we're the outer most nested read; update the tail pointer
+ * when we're done. This notifies possible pending writers
+ * that we've observed the head pointer and that the other
+ * entry is now free.
+ */
+ if (!--head->__count) {
+ /*
+ * x86-TSO does not reorder writes with older reads;
+ * therefore once this write becomes visible to another
+ * cpu, we must be finished reading the cyc2ns_data.
+ *
+ * matches with cyc2ns_write_begin().
+ */
+ this_cpu_write(cyc2ns.tail, head);
+ }
+ preempt_enable();
+}
+
+/*
+ * Begin writing a new @data entry for @cpu.
+ *
+ * Assumes some sort of write side lock; currently 'provided' by the assumption
+ * that cpufreq will call its notifiers sequentially.
+ */
+static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
+{
+ struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
+ struct cyc2ns_data *data = c2n->data;
+
+ if (data == c2n->head)
+ data++;
+
+ /* XXX send an IPI to @cpu in order to guarantee a read? */
+
+ /*
+ * When we observe the tail write from cyc2ns_read_end(),
+ * the cpu must be done with that entry and its safe
+ * to start writing to it.
+ */
+ while (c2n->tail == data)
+ cpu_relax();
+
+ return data;
+}
+
+static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
+{
+ struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
+
+ /*
+ * Ensure the @data writes are visible before we publish the
+ * entry. Matches the data-depencency in cyc2ns_read_begin().
+ */
+ smp_wmb();
+
+ ACCESS_ONCE(c2n->head) = data;
+}
+
+/*
+ * Accelerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better precision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static void cyc2ns_data_init(struct cyc2ns_data *data)
+{
+ data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR;
+ data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
+ data->cyc2ns_offset = 0;
+ data->__count = 0;
+}
+
+static void cyc2ns_init(int cpu)
+{
+ struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
+
+ cyc2ns_data_init(&c2n->data[0]);
+ cyc2ns_data_init(&c2n->data[1]);
+
+ c2n->head = c2n->data;
+ c2n->tail = c2n->data;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ struct cyc2ns_data *data, *tail;
+ unsigned long long ns;
+
+ /*
+ * See cyc2ns_read_*() for details; replicated in order to avoid
+ * an extra few instructions that came with the abstraction.
+ * Notable, it allows us to only do the __count and tail update
+ * dance when its actually needed.
+ */
+
+ preempt_disable();
+ data = this_cpu_read(cyc2ns.head);
+ tail = this_cpu_read(cyc2ns.tail);
+
+ if (likely(data == tail)) {
+ ns = data->cyc2ns_offset;
+ ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+ } else {
+ data->__count++;
+
+ barrier();
+
+ ns = data->cyc2ns_offset;
+ ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+
+ barrier();
+
+ if (!--data->__count)
+ this_cpu_write(cyc2ns.tail, data);
+ }
+ preempt_enable();
+
+ return ns;
+}
+
+/* XXX surely we already have this someplace in the kernel?! */
+#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d))
+
+static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+{
+ unsigned long long tsc_now, ns_now;
+ struct cyc2ns_data *data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sched_clock_idle_sleep_event();
+
+ if (!cpu_khz)
+ goto done;
+
+ data = cyc2ns_write_begin(cpu);
+
+ rdtscll(tsc_now);
+ ns_now = cycles_2_ns(tsc_now);
+
+ /*
+ * Compute a new multiplier as per the above comment and ensure our
+ * time function is continuous; see the comment near struct
+ * cyc2ns_data.
+ */
+ data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz);
+ data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
+ data->cyc2ns_offset = ns_now -
+ mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+
+ cyc2ns_write_end(cpu, data);
+
+done:
+ sched_clock_idle_wakeup_event(0);
+ local_irq_restore(flags);
+}
/*
* Scheduler clock - returns current time in nanosec units.
*/
u64 native_sched_clock(void)
{
- u64 this_offset;
+ u64 tsc_now;
/*
* Fall back to jiffies if there's no TSC available:
@@ -53,16 +285,16 @@ u64 native_sched_clock(void)
* very important for it to be as fast as the platform
* can achieve it. )
*/
- if (unlikely(tsc_disabled)) {
+ if (!static_key_false(&__use_tsc)) {
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
/* read the Time Stamp Counter: */
- rdtscll(this_offset);
+ rdtscll(tsc_now);
/* return the value in ns */
- return __cycles_2_ns(this_offset);
+ return cycles_2_ns(tsc_now);
}
/* We need to define a real function for sched_clock, to override the
@@ -419,6 +651,16 @@ unsigned long native_calibrate_tsc(void)
unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;
+ /* Calibrate TSC using MSR for Intel Atom SoCs */
+ local_irq_save(flags);
+ i = try_msr_calibrate_tsc(&fast_calibrate);
+ local_irq_restore(flags);
+ if (i >= 0) {
+ if (i == 0)
+ pr_warn("Fast TSC calibration using MSR failed\n");
+ return fast_calibrate;
+ }
+
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();
local_irq_restore(flags);
@@ -589,61 +831,11 @@ int recalibrate_cpu_khz(void)
EXPORT_SYMBOL(recalibrate_cpu_khz);
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
- *
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- *
- * We can use khz divisor instead of mhz to keep a better precision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-
-DEFINE_PER_CPU(unsigned long, cyc2ns);
-DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
-
-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
-{
- unsigned long long tsc_now, ns_now, *offset;
- unsigned long flags, *scale;
-
- local_irq_save(flags);
- sched_clock_idle_sleep_event();
-
- scale = &per_cpu(cyc2ns, cpu);
- offset = &per_cpu(cyc2ns_offset, cpu);
-
- rdtscll(tsc_now);
- ns_now = __cycles_2_ns(tsc_now);
-
- if (cpu_khz) {
- *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
- cpu_khz / 2) / cpu_khz;
- *offset = ns_now - mult_frac(tsc_now, *scale,
- (1UL << CYC2NS_SCALE_FACTOR));
- }
-
- sched_clock_idle_wakeup_event(0);
- local_irq_restore(flags);
-}
-
static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void)
{
- if (!sched_clock_stable)
+ if (!sched_clock_stable())
return;
cyc2ns_suspend = sched_clock();
@@ -663,16 +855,26 @@ void tsc_restore_sched_clock_state(void)
unsigned long flags;
int cpu;
- if (!sched_clock_stable)
+ if (!sched_clock_stable())
return;
local_irq_save(flags);
- __this_cpu_write(cyc2ns_offset, 0);
+ /*
+ * We're comming out of suspend, there's no concurrency yet; don't
+ * bother being nice about the RCU stuff, just write to both
+ * data fields.
+ */
+
+ this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
+ this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
+
offset = cyc2ns_suspend - sched_clock();
- for_each_possible_cpu(cpu)
- per_cpu(cyc2ns_offset, cpu) = offset;
+ for_each_possible_cpu(cpu) {
+ per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
+ per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
+ }
local_irq_restore(flags);
}
@@ -795,7 +997,7 @@ void mark_tsc_unstable(char *reason)
{
if (!tsc_unstable) {
tsc_unstable = 1;
- sched_clock_stable = 0;
+ clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
@@ -995,14 +1197,18 @@ void __init tsc_init(void)
* speed as the bootup CPU. (cpufreq notifiers will fix this
* up if their speed diverges)
*/
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ cyc2ns_init(cpu);
set_cyc2ns_scale(cpu_khz, cpu);
+ }
if (tsc_disabled > 0)
return;
/* now allow native_sched_clock() to use rdtsc */
+
tsc_disabled = 0;
+ static_key_slow_inc(&__use_tsc);
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
new file mode 100644
index 0000000..8b5434f
--- /dev/null
+++ b/arch/x86/kernel/tsc_msr.c
@@ -0,0 +1,127 @@
+/*
+ * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms.
+ *
+ * TSC in Intel Atom SoC runs at a constant rate which can be figured
+ * by this formula:
+ * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency>
+ * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5
+ * for details.
+ * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR
+ * based calibration is the only option.
+ *
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/apic.h>
+#include <asm/param.h>
+
+/* CPU reference clock frequency: in KHz */
+#define FREQ_83 83200
+#define FREQ_100 99840
+#define FREQ_133 133200
+#define FREQ_166 166400
+
+#define MAX_NUM_FREQS 8
+
+/*
+ * According to Intel 64 and IA-32 System Programming Guide,
+ * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
+ * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
+ * Unfortunately some Intel Atom SoCs aren't quite compliant to this,
+ * so we need manually differentiate SoC families. This is what the
+ * field msr_plat does.
+ */
+struct freq_desc {
+ u8 x86_family; /* CPU family */
+ u8 x86_model; /* model */
+ u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
+ u32 freqs[MAX_NUM_FREQS];
+};
+
+static struct freq_desc freq_desc_tables[] = {
+ /* PNW */
+ { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+ /* CLV+ */
+ { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+ /* TNG */
+ { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
+ /* VLV2 */
+ { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
+ /* ANN */
+ { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
+};
+
+static int match_cpu(u8 family, u8 model)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
+ if ((family == freq_desc_tables[i].x86_family) &&
+ (model == freq_desc_tables[i].x86_model))
+ return i;
+ }
+
+ return -1;
+}
+
+/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
+#define id_to_freq(cpu_index, freq_id) \
+ (freq_desc_tables[cpu_index].freqs[freq_id])
+
+/*
+ * Do MSR calibration only for known/supported CPUs.
+ * Return values:
+ * -1: CPU is unknown/unsupported for MSR based calibration
+ * 0: CPU is known/supported, but calibration failed
+ * 1: CPU is known/supported, and calibration succeeded
+ */
+int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
+{
+ int cpu_index;
+ u32 lo, hi, ratio, freq_id, freq;
+
+ cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
+ if (cpu_index < 0)
+ return -1;
+
+ *fast_calibrate = 0;
+
+ if (freq_desc_tables[cpu_index].msr_plat) {
+ rdmsr(MSR_PLATFORM_INFO, lo, hi);
+ ratio = (lo >> 8) & 0x1f;
+ } else {
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+ }
+ pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
+
+ if (!ratio)
+ return 0;
+
+ /* Get FSB FREQ ID */
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ freq_id = lo & 0x7;
+ freq = id_to_freq(cpu_index, freq_id);
+ pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
+ freq_id, freq);
+ if (!freq)
+ return 0;
+
+ /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
+ *fast_calibrate = freq * ratio;
+ pr_info("TSC runs at %lu KHz\n", *fast_calibrate);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ lapic_timer_frequency = (freq * 1000) / HZ;
+ pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
+#endif
+
+ return 1;
+}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index adfdf56..2648848 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -16,7 +16,6 @@
*/
#include <linux/spinlock.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 422fd82..a4b451c 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void)
if (cpu_has_xsaveopt && eagerfpu != DISABLE)
eagerfpu = ENABLE;
+ if (pcntxt_mask & XSTATE_EAGER) {
+ if (eagerfpu == DISABLE) {
+ pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
+ pcntxt_mask & XSTATE_EAGER);
+ pcntxt_mask &= ~XSTATE_EAGER;
+ } else {
+ eagerfpu = ENABLE;
+ }
+ }
+
pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
pcntxt_mask, xstate_size);
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index dec48bf..775702f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1350,8 +1350,12 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
+ if (!kvm_vcpu_is_bsp(apic->vcpu))
+ value &= ~MSR_IA32_APICBASE_BSP;
+ vcpu->arch.apic_base = value;
+
/* update jump label if enable bit changes */
- if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
+ if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else
@@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
recalculate_apic_map(vcpu->kvm);
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
-
- vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c2..da7837e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
- if (nested_cpu_has_ept(vmcs12))
- nested_ept_uninit_mmu_context(vcpu);
+ nested_ept_uninit_mmu_context(vcpu);
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index a30ca15..dee945d 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -186,7 +186,7 @@ ENTRY(copy_user_generic_unrolled)
30: shll $6,%ecx
addl %ecx,%edx
jmp 60f
-40: lea (%rdx,%rcx,8),%rdx
+40: leal (%rdx,%rcx,8),%edx
jmp 60f
50: movl %ecx,%edx
60: jmp copy_user_handle_tail /* ecx is zerorest also */
@@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled)
ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC
- andl %edx,%edx
- jz 4f
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
@@ -249,12 +247,12 @@ ENTRY(copy_user_generic_string)
2: movl %edx,%ecx
3: rep
movsb
-4: xorl %eax,%eax
+ xorl %eax,%eax
ASM_CLAC
ret
.section .fixup,"ax"
-11: lea (%rdx,%rcx,8),%rcx
+11: leal (%rdx,%rcx,8),%ecx
12: movl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous
@@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string)
ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC
- andl %edx,%edx
- jz 2f
movl %edx,%ecx
1: rep
movsb
-2: xorl %eax,%eax
+ xorl %eax,%eax
ASM_CLAC
ret
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 7c3bee6..39d6a3d 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -16,7 +16,6 @@
#include <linux/timex.h>
#include <linux/preempt.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/delay.h>
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 533a85e..1a2be7c 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -346,8 +346,8 @@ AVXcode: 1
17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
18: Grp16 (1A)
19:
-1a:
-1b:
+1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
+1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
1c:
1d:
1e:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9ff85bb..9d591c8 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
+ /*
+ * Any interrupt that takes a fault gets the fixup. This makes
+ * the below recursive fault logic only apply to a faults from
+ * task context.
+ */
+ if (in_interrupt())
+ return;
+
+ /*
+ * Per the above we're !in_interrupt(), aka. task context.
+ *
+ * In this case we need to make sure we're not recursively
+ * faulting through the emulate_vsyscall() logic.
+ */
if (current_thread_info()->sig_on_uaccess_error && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
@@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* XXX: hwpoison faults will set the wrong code. */
force_sig_info_fault(signal, si_code, address, tsk, 0);
}
+
+ /*
+ * Barring that, we can do the fixup and be happy.
+ */
return;
}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 9d980d8..8c9f647 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -87,9 +87,7 @@ int pmd_huge_support(void)
}
#endif
-/* x86_64 also uses this file */
-
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
@@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.flags = 0;
info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
+ info.low_limit = current->mm->mmap_legacy_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
-
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4287f1f..5bdc543 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -806,6 +806,9 @@ void __init mem_init(void)
BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
#undef high_memory
#undef __FIXADDR_TOP
+#ifdef CONFIG_RANDOMIZE_BASE
+ BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
+#endif
#ifdef CONFIG_HIGHMEM
BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index e5d5e2c..637ab34 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -11,7 +11,6 @@
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/hash.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 24aec58..c85da7b 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -211,9 +211,13 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
*/
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
- pr_err("Cannot find %zu bytes in node %d\n",
- nd_size, nid);
- return;
+ nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
+ MEMBLOCK_ALLOC_ACCESSIBLE);
+ if (!nd_pa) {
+ pr_err("Cannot find %zu bytes in node %d\n",
+ nd_size, nid);
+ return;
+ }
}
nd = __va(nd_pa);
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index d0b1773..461bc82 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -8,7 +8,6 @@
#include <linux/kthread.h>
#include <linux/random.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bb32480..b3b19f4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -30,6 +30,7 @@
*/
struct cpa_data {
unsigned long *vaddr;
+ pgd_t *pgd;
pgprot_t mask_set;
pgprot_t mask_clr;
int numpages;
@@ -322,17 +323,9 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
return prot;
}
-/*
- * Lookup the page table entry for a virtual address. Return a pointer
- * to the entry and the level of the mapping.
- *
- * Note: We return pud and pmd either when the entry is marked large
- * or when the present bit is not set. Otherwise we would return a
- * pointer to a nonexisting mapping.
- */
-pte_t *lookup_address(unsigned long address, unsigned int *level)
+static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level)
{
- pgd_t *pgd = pgd_offset_k(address);
pud_t *pud;
pmd_t *pmd;
@@ -361,8 +354,31 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
return pte_offset_kernel(pmd, address);
}
+
+/*
+ * Lookup the page table entry for a virtual address. Return a pointer
+ * to the entry and the level of the mapping.
+ *
+ * Note: We return pud and pmd either when the entry is marked large
+ * or when the present bit is not set. Otherwise we would return a
+ * pointer to a nonexisting mapping.
+ */
+pte_t *lookup_address(unsigned long address, unsigned int *level)
+{
+ return __lookup_address_in_pgd(pgd_offset_k(address), address, level);
+}
EXPORT_SYMBOL_GPL(lookup_address);
+static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
+ unsigned int *level)
+{
+ if (cpa->pgd)
+ return __lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+ address, level);
+
+ return lookup_address(address, level);
+}
+
/*
* This is necessary because __pa() does not work on some
* kinds of memory, like vmalloc() or the alloc_remap()
@@ -437,7 +453,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* Check for races, another CPU might have split this page
* up already:
*/
- tmp = lookup_address(address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte)
goto out_unlock;
@@ -543,7 +559,8 @@ out_unlock:
}
static int
-__split_large_page(pte_t *kpte, unsigned long address, struct page *base)
+__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ struct page *base)
{
pte_t *pbase = (pte_t *)page_address(base);
unsigned long pfn, pfninc = 1;
@@ -556,7 +573,7 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base)
* Check for races, another CPU might have split this page
* up for us already:
*/
- tmp = lookup_address(address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte) {
spin_unlock(&pgd_lock);
return 1;
@@ -632,7 +649,8 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base)
return 0;
}
-static int split_large_page(pte_t *kpte, unsigned long address)
+static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
+ unsigned long address)
{
struct page *base;
@@ -644,15 +662,390 @@ static int split_large_page(pte_t *kpte, unsigned long address)
if (!base)
return -ENOMEM;
- if (__split_large_page(kpte, address, base))
+ if (__split_large_page(cpa, kpte, address, base))
__free_page(base);
return 0;
}
+static bool try_to_free_pte_page(pte_t *pte)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ if (!pte_none(pte[i]))
+ return false;
+
+ free_page((unsigned long)pte);
+ return true;
+}
+
+static bool try_to_free_pmd_page(pmd_t *pmd)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ if (!pmd_none(pmd[i]))
+ return false;
+
+ free_page((unsigned long)pmd);
+ return true;
+}
+
+static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, start);
+
+ while (start < end) {
+ set_pte(pte, __pte(0));
+
+ start += PAGE_SIZE;
+ pte++;
+ }
+
+ if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
+ pmd_clear(pmd);
+ return true;
+ }
+ return false;
+}
+
+static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
+ unsigned long start, unsigned long end)
+{
+ if (unmap_pte_range(pmd, start, end))
+ if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ pud_clear(pud);
+}
+
+static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, start);
+
+ /*
+ * Not on a 2MB page boundary?
+ */
+ if (start & (PMD_SIZE - 1)) {
+ unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
+ unsigned long pre_end = min_t(unsigned long, end, next_page);
+
+ __unmap_pmd_range(pud, pmd, start, pre_end);
+
+ start = pre_end;
+ pmd++;
+ }
+
+ /*
+ * Try to unmap in 2M chunks.
+ */
+ while (end - start >= PMD_SIZE) {
+ if (pmd_large(*pmd))
+ pmd_clear(pmd);
+ else
+ __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
+
+ start += PMD_SIZE;
+ pmd++;
+ }
+
+ /*
+ * 4K leftovers?
+ */
+ if (start < end)
+ return __unmap_pmd_range(pud, pmd, start, end);
+
+ /*
+ * Try again to free the PMD page if haven't succeeded above.
+ */
+ if (!pud_none(*pud))
+ if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ pud_clear(pud);
+}
+
+static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+{
+ pud_t *pud = pud_offset(pgd, start);
+
+ /*
+ * Not on a GB page boundary?
+ */
+ if (start & (PUD_SIZE - 1)) {
+ unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
+ unsigned long pre_end = min_t(unsigned long, end, next_page);
+
+ unmap_pmd_range(pud, start, pre_end);
+
+ start = pre_end;
+ pud++;
+ }
+
+ /*
+ * Try to unmap in 1G chunks?
+ */
+ while (end - start >= PUD_SIZE) {
+
+ if (pud_large(*pud))
+ pud_clear(pud);
+ else
+ unmap_pmd_range(pud, start, start + PUD_SIZE);
+
+ start += PUD_SIZE;
+ pud++;
+ }
+
+ /*
+ * 2M leftovers?
+ */
+ if (start < end)
+ unmap_pmd_range(pud, start, end);
+
+ /*
+ * No need to try to free the PUD page because we'll free it in
+ * populate_pgd's error path
+ */
+}
+
+static int alloc_pte_page(pmd_t *pmd)
+{
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!pte)
+ return -1;
+
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+ return 0;
+}
+
+static int alloc_pmd_page(pud_t *pud)
+{
+ pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!pmd)
+ return -1;
+
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+ return 0;
+}
+
+static void populate_pte(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
+{
+ pte_t *pte;
+
+ pte = pte_offset_kernel(pmd, start);
+
+ while (num_pages-- && start < end) {
+
+ /* deal with the NX bit */
+ if (!(pgprot_val(pgprot) & _PAGE_NX))
+ cpa->pfn &= ~_PAGE_NX;
+
+ set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+
+ start += PAGE_SIZE;
+ cpa->pfn += PAGE_SIZE;
+ pte++;
+ }
+}
+
+static int populate_pmd(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+{
+ unsigned int cur_pages = 0;
+ pmd_t *pmd;
+
+ /*
+ * Not on a 2M boundary?
+ */
+ if (start & (PMD_SIZE - 1)) {
+ unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
+ unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
+
+ pre_end = min_t(unsigned long, pre_end, next_page);
+ cur_pages = (pre_end - start) >> PAGE_SHIFT;
+ cur_pages = min_t(unsigned int, num_pages, cur_pages);
+
+ /*
+ * Need a PTE page?
+ */
+ pmd = pmd_offset(pud, start);
+ if (pmd_none(*pmd))
+ if (alloc_pte_page(pmd))
+ return -1;
+
+ populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
+
+ start = pre_end;
+ }
+
+ /*
+ * We mapped them all?
+ */
+ if (num_pages == cur_pages)
+ return cur_pages;
+
+ while (end - start >= PMD_SIZE) {
+
+ /*
+ * We cannot use a 1G page so allocate a PMD page if needed.
+ */
+ if (pud_none(*pud))
+ if (alloc_pmd_page(pud))
+ return -1;
+
+ pmd = pmd_offset(pud, start);
+
+ set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+
+ start += PMD_SIZE;
+ cpa->pfn += PMD_SIZE;
+ cur_pages += PMD_SIZE >> PAGE_SHIFT;
+ }
+
+ /*
+ * Map trailing 4K pages.
+ */
+ if (start < end) {
+ pmd = pmd_offset(pud, start);
+ if (pmd_none(*pmd))
+ if (alloc_pte_page(pmd))
+ return -1;
+
+ populate_pte(cpa, start, end, num_pages - cur_pages,
+ pmd, pgprot);
+ }
+ return num_pages;
+}
+
+static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+ pgprot_t pgprot)
+{
+ pud_t *pud;
+ unsigned long end;
+ int cur_pages = 0;
+
+ end = start + (cpa->numpages << PAGE_SHIFT);
+
+ /*
+ * Not on a Gb page boundary? => map everything up to it with
+ * smaller pages.
+ */
+ if (start & (PUD_SIZE - 1)) {
+ unsigned long pre_end;
+ unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
+
+ pre_end = min_t(unsigned long, end, next_page);
+ cur_pages = (pre_end - start) >> PAGE_SHIFT;
+ cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
+
+ pud = pud_offset(pgd, start);
+
+ /*
+ * Need a PMD page?
+ */
+ if (pud_none(*pud))
+ if (alloc_pmd_page(pud))
+ return -1;
+
+ cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
+ pud, pgprot);
+ if (cur_pages < 0)
+ return cur_pages;
+
+ start = pre_end;
+ }
+
+ /* We mapped them all? */
+ if (cpa->numpages == cur_pages)
+ return cur_pages;
+
+ pud = pud_offset(pgd, start);
+
+ /*
+ * Map everything starting from the Gb boundary, possibly with 1G pages
+ */
+ while (end - start >= PUD_SIZE) {
+ set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+
+ start += PUD_SIZE;
+ cpa->pfn += PUD_SIZE;
+ cur_pages += PUD_SIZE >> PAGE_SHIFT;
+ pud++;
+ }
+
+ /* Map trailing leftover */
+ if (start < end) {
+ int tmp;
+
+ pud = pud_offset(pgd, start);
+ if (pud_none(*pud))
+ if (alloc_pmd_page(pud))
+ return -1;
+
+ tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
+ pud, pgprot);
+ if (tmp < 0)
+ return cur_pages;
+
+ cur_pages += tmp;
+ }
+ return cur_pages;
+}
+
+/*
+ * Restrictions for kernel page table do not necessarily apply when mapping in
+ * an alternate PGD.
+ */
+static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
+{
+ pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
+ bool allocd_pgd = false;
+ pgd_t *pgd_entry;
+ pud_t *pud = NULL; /* shut up gcc */
+ int ret;
+
+ pgd_entry = cpa->pgd + pgd_index(addr);
+
+ /*
+ * Allocate a PUD page and hand it down for mapping.
+ */
+ if (pgd_none(*pgd_entry)) {
+ pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!pud)
+ return -1;
+
+ set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
+ allocd_pgd = true;
+ }
+
+ pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
+ pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
+
+ ret = populate_pud(cpa, addr, pgd_entry, pgprot);
+ if (ret < 0) {
+ unmap_pud_range(pgd_entry, addr,
+ addr + (cpa->numpages << PAGE_SHIFT));
+
+ if (allocd_pgd) {
+ /*
+ * If I allocated this PUD page, I can just as well
+ * free it in this error path.
+ */
+ pgd_clear(pgd_entry);
+ free_page((unsigned long)pud);
+ }
+ return ret;
+ }
+ cpa->numpages = ret;
+ return 0;
+}
+
static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
int primary)
{
+ if (cpa->pgd)
+ return populate_pgd(cpa, vaddr);
+
/*
* Ignore all non primary paths.
*/
@@ -697,7 +1090,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
else
address = *cpa->vaddr;
repeat:
- kpte = lookup_address(address, &level);
+ kpte = _lookup_address_cpa(cpa, address, &level);
if (!kpte)
return __cpa_process_fault(cpa, address, primary);
@@ -761,7 +1154,7 @@ repeat:
/*
* We have to split the large page:
*/
- err = split_large_page(kpte, address);
+ err = split_large_page(cpa, kpte, address);
if (!err) {
/*
* Do a global flush tlb after splitting the large page
@@ -910,6 +1303,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
int ret, cache, checkalias;
unsigned long baddr = 0;
+ memset(&cpa, 0, sizeof(cpa));
+
/*
* Check, if we are requested to change a not supported
* feature:
@@ -1356,6 +1751,7 @@ static int __set_pages_p(struct page *page, int numpages)
{
unsigned long tempaddr = (unsigned long) page_address(page);
struct cpa_data cpa = { .vaddr = &tempaddr,
+ .pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.mask_clr = __pgprot(0),
@@ -1374,6 +1770,7 @@ static int __set_pages_np(struct page *page, int numpages)
{
unsigned long tempaddr = (unsigned long) page_address(page);
struct cpa_data cpa = { .vaddr = &tempaddr,
+ .pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
@@ -1434,6 +1831,36 @@ bool kernel_page_present(struct page *page)
#endif /* CONFIG_DEBUG_PAGEALLOC */
+int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags)
+{
+ int retval = -EINVAL;
+
+ struct cpa_data cpa = {
+ .vaddr = &address,
+ .pfn = pfn,
+ .pgd = pgd,
+ .numpages = numpages,
+ .mask_set = __pgprot(0),
+ .mask_clr = __pgprot(0),
+ .flags = 0,
+ };
+
+ if (!(__supported_pte_mask & _PAGE_NX))
+ goto out;
+
+ if (!(page_flags & _PAGE_NX))
+ cpa.mask_clr = __pgprot(_PAGE_NX);
+
+ cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
+
+ retval = __change_page_attr_set_clr(&cpa, 0);
+ __flush_tlb_all();
+
+out:
+ return retval;
+}
+
/*
* The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here.
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 26328e8..4ed75dd 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
case BPF_S_ALU_MOD_K: /* A %= K; */
+ if (K == 1) {
+ CLEAR_A();
+ break;
+ }
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
EMIT2(0xf7, 0xf1); /* div %ecx */
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
- EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
- EMIT(K, 4);
- EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+ EMIT2(0xf7, 0xf1); /* div %ecx */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index b046e07..bca9e85 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -5,7 +5,6 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/vgaarb.h>
#include <asm/pci_x86.h>
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 51384ca..84b9d67 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -31,6 +31,7 @@
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
#define PCIE_CAP_OFFSET 0x100
@@ -219,7 +220,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
irq_attr.ioapic = mp_find_ioapic(dev->irq);
irq_attr.ioapic_pin = dev->irq;
irq_attr.trigger = 1; /* level */
- irq_attr.polarity = 1; /* active low */
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ irq_attr.polarity = 0; /* active high */
+ else
+ irq_attr.polarity = 1; /* active low */
io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
return 0;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cceb813..d62ec87 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -12,6 +12,8 @@
* Bibo Mao <bibo.mao@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
* Huang Ying <ying.huang@intel.com>
+ * Copyright (C) 2013 SuSE Labs
+ * Borislav Petkov <bp@suse.de> - runtime services VA mapping
*
* Copied from efi_32.c to eliminate the duplicated code between EFI
* 32/64 support code. --ying 2007-10-26
@@ -51,7 +53,7 @@
#include <asm/x86_init.h>
#include <asm/rtc.h>
-#define EFI_DEBUG 1
+#define EFI_DEBUG
#define EFI_MIN_RESERVE 5120
@@ -74,6 +76,8 @@ static __initdata efi_config_table_type_t arch_tables[] = {
{NULL_GUID, NULL, NULL},
};
+u64 efi_setup; /* efi setup_data physical address */
+
/*
* Returns 1 if 'facility' is enabled, 0 otherwise.
*/
@@ -110,7 +114,6 @@ static int __init setup_storage_paranoia(char *arg)
}
early_param("efi_no_storage_paranoia", setup_storage_paranoia);
-
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
unsigned long flags;
@@ -398,9 +401,9 @@ int __init efi_memblock_x86_reserve_range(void)
return 0;
}
-#if EFI_DEBUG
static void __init print_efi_memmap(void)
{
+#ifdef EFI_DEBUG
efi_memory_desc_t *md;
void *p;
int i;
@@ -415,8 +418,8 @@ static void __init print_efi_memmap(void)
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
}
-}
#endif /* EFI_DEBUG */
+}
void __init efi_reserve_boot_services(void)
{
@@ -436,7 +439,7 @@ void __init efi_reserve_boot_services(void)
* - Not within any part of the kernel
* - Not the bios reserved area
*/
- if ((start+size >= __pa_symbol(_text)
+ if ((start + size > __pa_symbol(_text)
&& start <= __pa_symbol(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
memblock_is_region_reserved(start, size)) {
@@ -489,18 +492,27 @@ static int __init efi_systab_init(void *phys)
{
if (efi_enabled(EFI_64BIT)) {
efi_system_table_64_t *systab64;
+ struct efi_setup_data *data = NULL;
u64 tmp = 0;
+ if (efi_setup) {
+ data = early_memremap(efi_setup, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+ }
systab64 = early_ioremap((unsigned long)phys,
sizeof(*systab64));
if (systab64 == NULL) {
pr_err("Couldn't map the system table!\n");
+ if (data)
+ early_iounmap(data, sizeof(*data));
return -ENOMEM;
}
efi_systab.hdr = systab64->hdr;
- efi_systab.fw_vendor = systab64->fw_vendor;
- tmp |= systab64->fw_vendor;
+ efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
+ systab64->fw_vendor;
+ tmp |= data ? data->fw_vendor : systab64->fw_vendor;
efi_systab.fw_revision = systab64->fw_revision;
efi_systab.con_in_handle = systab64->con_in_handle;
tmp |= systab64->con_in_handle;
@@ -514,15 +526,20 @@ static int __init efi_systab_init(void *phys)
tmp |= systab64->stderr_handle;
efi_systab.stderr = systab64->stderr;
tmp |= systab64->stderr;
- efi_systab.runtime = (void *)(unsigned long)systab64->runtime;
- tmp |= systab64->runtime;
+ efi_systab.runtime = data ?
+ (void *)(unsigned long)data->runtime :
+ (void *)(unsigned long)systab64->runtime;
+ tmp |= data ? data->runtime : systab64->runtime;
efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
tmp |= systab64->boottime;
efi_systab.nr_tables = systab64->nr_tables;
- efi_systab.tables = systab64->tables;
- tmp |= systab64->tables;
+ efi_systab.tables = data ? (unsigned long)data->tables :
+ systab64->tables;
+ tmp |= data ? data->tables : systab64->tables;
early_iounmap(systab64, sizeof(*systab64));
+ if (data)
+ early_iounmap(data, sizeof(*data));
#ifdef CONFIG_X86_32
if (tmp >> 32) {
pr_err("EFI data located above 4GB, disabling EFI.\n");
@@ -626,6 +643,62 @@ static int __init efi_memmap_init(void)
return 0;
}
+/*
+ * A number of config table entries get remapped to virtual addresses
+ * after entering EFI virtual mode. However, the kexec kernel requires
+ * their physical addresses therefore we pass them via setup_data and
+ * correct those entries to their respective physical addresses here.
+ *
+ * Currently only handles smbios which is necessary for some firmware
+ * implementation.
+ */
+static int __init efi_reuse_config(u64 tables, int nr_tables)
+{
+ int i, sz, ret = 0;
+ void *p, *tablep;
+ struct efi_setup_data *data;
+
+ if (!efi_setup)
+ return 0;
+
+ if (!efi_enabled(EFI_64BIT))
+ return 0;
+
+ data = early_memremap(efi_setup, sizeof(*data));
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!data->smbios)
+ goto out_memremap;
+
+ sz = sizeof(efi_config_table_64_t);
+
+ p = tablep = early_memremap(tables, nr_tables * sz);
+ if (!p) {
+ pr_err("Could not map Configuration table!\n");
+ ret = -ENOMEM;
+ goto out_memremap;
+ }
+
+ for (i = 0; i < efi.systab->nr_tables; i++) {
+ efi_guid_t guid;
+
+ guid = ((efi_config_table_64_t *)p)->guid;
+
+ if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
+ ((efi_config_table_64_t *)p)->table = data->smbios;
+ p += sz;
+ }
+ early_iounmap(tablep, nr_tables * sz);
+
+out_memremap:
+ early_iounmap(data, sizeof(*data));
+out:
+ return ret;
+}
+
void __init efi_init(void)
{
efi_char16_t *c16;
@@ -651,6 +724,10 @@ void __init efi_init(void)
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+ efi.config_table = (unsigned long)efi.systab->tables;
+ efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
+ efi.runtime = (unsigned long)efi.systab->runtime;
+
/*
* Show what we know for posterity
*/
@@ -667,6 +744,9 @@ void __init efi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
+ if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
+ return;
+
if (efi_config_init(arch_tables))
return;
@@ -684,15 +764,12 @@ void __init efi_init(void)
return;
set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
}
-
if (efi_memmap_init())
return;
set_bit(EFI_MEMMAP, &x86_efi_facility);
-#if EFI_DEBUG
print_efi_memmap();
-#endif
}
void __init efi_late_init(void)
@@ -741,36 +818,38 @@ void efi_memory_uc(u64 addr, unsigned long size)
set_memory_uc(addr, npages);
}
-/*
- * This function will switch the EFI runtime services to virtual mode.
- * Essentially, look through the EFI memmap and map every region that
- * has the runtime attribute bit set in its memory descriptor and update
- * that memory descriptor with the virtual address obtained from ioremap().
- * This enables the runtime services to be called without having to
- * thunk back into physical mode for every invocation.
- */
-void __init efi_enter_virtual_mode(void)
+void __init old_map_region(efi_memory_desc_t *md)
{
- efi_memory_desc_t *md, *prev_md = NULL;
- efi_status_t status;
+ u64 start_pfn, end_pfn, end;
unsigned long size;
- u64 end, systab, start_pfn, end_pfn;
- void *p, *va, *new_memmap = NULL;
- int count = 0;
+ void *va;
- efi.systab = NULL;
+ start_pfn = PFN_DOWN(md->phys_addr);
+ size = md->num_pages << PAGE_SHIFT;
+ end = md->phys_addr + size;
+ end_pfn = PFN_UP(end);
- /*
- * We don't do virtual mode, since we don't do runtime services, on
- * non-native EFI
- */
+ if (pfn_range_is_mapped(start_pfn, end_pfn)) {
+ va = __va(md->phys_addr);
- if (!efi_is_native()) {
- efi_unmap_memmap();
- return;
- }
+ if (!(md->attribute & EFI_MEMORY_WB))
+ efi_memory_uc((u64)(unsigned long)va, size);
+ } else
+ va = efi_ioremap(md->phys_addr, size,
+ md->type, md->attribute);
+
+ md->virt_addr = (u64) (unsigned long) va;
+ if (!va)
+ pr_err("ioremap of 0x%llX failed!\n",
+ (unsigned long long)md->phys_addr);
+}
+
+/* Merge contiguous regions of the same type and attribute */
+static void __init efi_merge_regions(void)
+{
+ void *p;
+ efi_memory_desc_t *md, *prev_md = NULL;
- /* Merge contiguous regions of the same type and attribute */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
u64 prev_size;
md = p;
@@ -796,6 +875,77 @@ void __init efi_enter_virtual_mode(void)
}
prev_md = md;
}
+}
+
+static void __init get_systab_virt_addr(efi_memory_desc_t *md)
+{
+ unsigned long size;
+ u64 end, systab;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+ systab = (u64)(unsigned long)efi_phys.systab;
+ if (md->phys_addr <= systab && systab < end) {
+ systab += md->virt_addr - md->phys_addr;
+ efi.systab = (efi_system_table_t *)(unsigned long)systab;
+ }
+}
+
+static int __init save_runtime_map(void)
+{
+ efi_memory_desc_t *md;
+ void *tmp, *p, *q = NULL;
+ int count = 0;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
+ (md->type == EFI_BOOT_SERVICES_CODE) ||
+ (md->type == EFI_BOOT_SERVICES_DATA))
+ continue;
+ tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
+ if (!tmp)
+ goto out;
+ q = tmp;
+
+ memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
+ count++;
+ }
+
+ efi_runtime_map_setup(q, count, memmap.desc_size);
+
+ return 0;
+out:
+ kfree(q);
+ return -ENOMEM;
+}
+
+/*
+ * Map efi regions which were passed via setup_data. The virt_addr is a fixed
+ * addr which was used in first kernel of a kexec boot.
+ */
+static void __init efi_map_regions_fixed(void)
+{
+ void *p;
+ efi_memory_desc_t *md;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+ efi_map_region_fixed(md); /* FIXME: add error handling */
+ get_systab_virt_addr(md);
+ }
+
+}
+
+/*
+ * Map efi memory ranges for runtime serivce and update new_memmap with virtual
+ * addresses.
+ */
+static void * __init efi_map_regions(int *count)
+{
+ efi_memory_desc_t *md;
+ void *p, *tmp, *new_memmap = NULL;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
@@ -807,53 +957,95 @@ void __init efi_enter_virtual_mode(void)
continue;
}
- size = md->num_pages << EFI_PAGE_SHIFT;
- end = md->phys_addr + size;
+ efi_map_region(md);
+ get_systab_virt_addr(md);
- start_pfn = PFN_DOWN(md->phys_addr);
- end_pfn = PFN_UP(end);
- if (pfn_range_is_mapped(start_pfn, end_pfn)) {
- va = __va(md->phys_addr);
+ tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
+ GFP_KERNEL);
+ if (!tmp)
+ goto out;
+ new_memmap = tmp;
+ memcpy(new_memmap + (*count * memmap.desc_size), md,
+ memmap.desc_size);
+ (*count)++;
+ }
- if (!(md->attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)va, size);
- } else
- va = efi_ioremap(md->phys_addr, size,
- md->type, md->attribute);
+ return new_memmap;
+out:
+ kfree(new_memmap);
+ return NULL;
+}
+
+/*
+ * This function will switch the EFI runtime services to virtual mode.
+ * Essentially, we look through the EFI memmap and map every region that
+ * has the runtime attribute bit set in its memory descriptor into the
+ * ->trampoline_pgd page table using a top-down VA allocation scheme.
+ *
+ * The old method which used to update that memory descriptor with the
+ * virtual address obtained from ioremap() is still supported when the
+ * kernel is booted with efi=old_map on its command line. Same old
+ * method enabled the runtime services to be called without having to
+ * thunk back into physical mode for every invocation.
+ *
+ * The new method does a pagetable switch in a preemption-safe manner
+ * so that we're in a different address space when calling a runtime
+ * function. For function arguments passing we do copy the PGDs of the
+ * kernel page table into ->trampoline_pgd prior to each call.
+ *
+ * Specially for kexec boot, efi runtime maps in previous kernel should
+ * be passed in via setup_data. In that case runtime ranges will be mapped
+ * to the same virtual addresses as the first kernel.
+ */
+void __init efi_enter_virtual_mode(void)
+{
+ efi_status_t status;
+ void *new_memmap = NULL;
+ int err, count = 0;
- md->virt_addr = (u64) (unsigned long) va;
+ efi.systab = NULL;
- if (!va) {
- pr_err("ioremap of 0x%llX failed!\n",
- (unsigned long long)md->phys_addr);
- continue;
- }
+ /*
+ * We don't do virtual mode, since we don't do runtime services, on
+ * non-native EFI
+ */
+ if (!efi_is_native()) {
+ efi_unmap_memmap();
+ return;
+ }
- systab = (u64) (unsigned long) efi_phys.systab;
- if (md->phys_addr <= systab && systab < end) {
- systab += md->virt_addr - md->phys_addr;
- efi.systab = (efi_system_table_t *) (unsigned long) systab;
+ if (efi_setup) {
+ efi_map_regions_fixed();
+ } else {
+ efi_merge_regions();
+ new_memmap = efi_map_regions(&count);
+ if (!new_memmap) {
+ pr_err("Error reallocating memory, EFI runtime non-functional!\n");
+ return;
}
- new_memmap = krealloc(new_memmap,
- (count + 1) * memmap.desc_size,
- GFP_KERNEL);
- memcpy(new_memmap + (count * memmap.desc_size), md,
- memmap.desc_size);
- count++;
}
+ err = save_runtime_map();
+ if (err)
+ pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+
BUG_ON(!efi.systab);
- status = phys_efi_set_virtual_address_map(
- memmap.desc_size * count,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)__pa(new_memmap));
+ efi_setup_page_tables();
+ efi_sync_low_kernel_mappings();
- if (status != EFI_SUCCESS) {
- pr_alert("Unable to switch EFI into virtual mode "
- "(status=%lx)!\n", status);
- panic("EFI call to SetVirtualAddressMap() failed!");
+ if (!efi_setup) {
+ status = phys_efi_set_virtual_address_map(
+ memmap.desc_size * count,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)__pa(new_memmap));
+
+ if (status != EFI_SUCCESS) {
+ pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
+ status);
+ panic("EFI call to SetVirtualAddressMap() failed!");
+ }
}
/*
@@ -876,7 +1068,8 @@ void __init efi_enter_virtual_mode(void)
efi.query_variable_info = virt_efi_query_variable_info;
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
- if (__supported_pte_mask & _PAGE_NX)
+
+ if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
kfree(new_memmap);
@@ -1006,3 +1199,15 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
return EFI_SUCCESS;
}
EXPORT_SYMBOL_GPL(efi_query_variable_store);
+
+static int __init parse_efi_cmdline(char *str)
+{
+ if (*str == '=')
+ str++;
+
+ if (!strncmp(str, "old_map", 7))
+ set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+
+ return 0;
+}
+early_param("efi", parse_efi_cmdline);
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 40e4469..249b183 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -37,9 +37,19 @@
* claim EFI runtime service handler exclusively and to duplicate a memory in
* low memory space say 0 - 3G.
*/
-
static unsigned long efi_rt_eflags;
+void efi_sync_low_kernel_mappings(void) {}
+void efi_setup_page_tables(void) {}
+
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ old_map_region(md);
+}
+
+void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
+void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
+
void efi_call_phys_prelog(void)
{
struct desc_ptr gdt_descr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 39a0e7f..6284f15 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -38,10 +38,28 @@
#include <asm/efi.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/realmode.h>
static pgd_t *save_pgd __initdata;
static unsigned long efi_flags __initdata;
+/*
+ * We allocate runtime services regions bottom-up, starting from -4G, i.e.
+ * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
+ */
+static u64 efi_va = -4 * (1UL << 30);
+#define EFI_VA_END (-68 * (1UL << 30))
+
+/*
+ * Scratch space used for switching the pagetable in the EFI stub
+ */
+struct efi_scratch {
+ u64 r15;
+ u64 prev_cr3;
+ pgd_t *efi_pgt;
+ bool use_pgd;
+};
+
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
@@ -65,6 +83,9 @@ void __init efi_call_phys_prelog(void)
int pgd;
int n_pgds;
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
early_code_mapping_set_exec(1);
local_irq_save(efi_flags);
@@ -86,6 +107,10 @@ void __init efi_call_phys_epilog(void)
*/
int pgd;
int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
for (pgd = 0; pgd < n_pgds; pgd++)
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
kfree(save_pgd);
@@ -94,6 +119,96 @@ void __init efi_call_phys_epilog(void)
early_code_mapping_set_exec(0);
}
+/*
+ * Add low kernel mappings for passing arguments to EFI functions.
+ */
+void efi_sync_low_kernel_mappings(void)
+{
+ unsigned num_pgds;
+ pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+ num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+
+ memcpy(pgd + pgd_index(PAGE_OFFSET),
+ init_mm.pgd + pgd_index(PAGE_OFFSET),
+ sizeof(pgd_t) * num_pgds);
+}
+
+void efi_setup_page_tables(void)
+{
+ efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ efi_scratch.use_pgd = true;
+}
+
+static void __init __map_region(efi_memory_desc_t *md, u64 va)
+{
+ pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+ unsigned long pf = 0;
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ pf |= _PAGE_PCD;
+
+ if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+ pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
+ md->phys_addr, va);
+}
+
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ unsigned long size = md->num_pages << PAGE_SHIFT;
+ u64 pa = md->phys_addr;
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return old_map_region(md);
+
+ /*
+ * Make sure the 1:1 mappings are present as a catch-all for b0rked
+ * firmware which doesn't update all internal pointers after switching
+ * to virtual mode and would otherwise crap on us.
+ */
+ __map_region(md, md->phys_addr);
+
+ efi_va -= size;
+
+ /* Is PA 2M-aligned? */
+ if (!(pa & (PMD_SIZE - 1))) {
+ efi_va &= PMD_MASK;
+ } else {
+ u64 pa_offset = pa & (PMD_SIZE - 1);
+ u64 prev_va = efi_va;
+
+ /* get us the same offset within this 2M page */
+ efi_va = (efi_va & PMD_MASK) + pa_offset;
+
+ if (efi_va > prev_va)
+ efi_va -= PMD_SIZE;
+ }
+
+ if (efi_va < EFI_VA_END) {
+ pr_warn(FW_WARN "VA address range overflow!\n");
+ return;
+ }
+
+ /* Do the VA map */
+ __map_region(md, efi_va);
+ md->virt_addr = efi_va;
+}
+
+/*
+ * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
+ * md->virt_addr is the original virtual address which had been mapped in kexec
+ * 1st kernel.
+ */
+void __init efi_map_region_fixed(efi_memory_desc_t *md)
+{
+ __map_region(md, md->virt_addr);
+}
+
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type, u64 attribute)
{
@@ -113,3 +228,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
return (void __iomem *)__va(phys_addr);
}
+
+void __init parse_efi_setup(u64 phys_addr, u32 data_len)
+{
+ efi_setup = phys_addr + sizeof(struct setup_data);
+}
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 4c07cca..88073b1 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -34,10 +34,47 @@
mov %rsi, %cr0; \
mov (%rsp), %rsp
+ /* stolen from gcc */
+ .macro FLUSH_TLB_ALL
+ movq %r15, efi_scratch(%rip)
+ movq %r14, efi_scratch+8(%rip)
+ movq %cr4, %r15
+ movq %r15, %r14
+ andb $0x7f, %r14b
+ movq %r14, %cr4
+ movq %r15, %cr4
+ movq efi_scratch+8(%rip), %r14
+ movq efi_scratch(%rip), %r15
+ .endm
+
+ .macro SWITCH_PGT
+ cmpb $0, efi_scratch+24(%rip)
+ je 1f
+ movq %r15, efi_scratch(%rip) # r15
+ # save previous CR3
+ movq %cr3, %r15
+ movq %r15, efi_scratch+8(%rip) # prev_cr3
+ movq efi_scratch+16(%rip), %r15 # EFI pgt
+ movq %r15, %cr3
+ 1:
+ .endm
+
+ .macro RESTORE_PGT
+ cmpb $0, efi_scratch+24(%rip)
+ je 2f
+ movq efi_scratch+8(%rip), %r15
+ movq %r15, %cr3
+ movq efi_scratch(%rip), %r15
+ FLUSH_TLB_ALL
+ 2:
+ .endm
+
ENTRY(efi_call0)
SAVE_XMM
subq $32, %rsp
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -47,7 +84,9 @@ ENTRY(efi_call1)
SAVE_XMM
subq $32, %rsp
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -57,7 +96,9 @@ ENTRY(efi_call2)
SAVE_XMM
subq $32, %rsp
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -68,7 +109,9 @@ ENTRY(efi_call3)
subq $32, %rsp
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -80,7 +123,9 @@ ENTRY(efi_call4)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -93,7 +138,9 @@ ENTRY(efi_call5)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
@@ -109,8 +156,15 @@ ENTRY(efi_call6)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
ENDPROC(efi_call6)
+
+ .data
+ENTRY(efi_scratch)
+ .fill 3,8,0
+ .byte 0
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index 01cc29e..0a8ee70 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,6 +1,6 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o
-obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o
obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o
+
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
obj-$(CONFIG_SFI) += sfi.o device_libs/
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
index 0d942c1..69a7836 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
@@ -22,7 +22,9 @@ static void __init *emc1403_platform_data(void *info)
int intr = get_gpio_by_name("thermal_int");
int intr2nd = get_gpio_by_name("thermal_alert");
- if (intr == -1 || intr2nd == -1)
+ if (intr < 0)
+ return NULL;
+ if (intr2nd < 0)
return NULL;
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
index a013a48..dccae6b 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -66,7 +66,7 @@ static int __init pb_keys_init(void)
gb[i].gpio = get_gpio_by_name(gb[i].desc);
pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc,
gb[i].gpio);
- if (gb[i].gpio == -1)
+ if (gb[i].gpio < 0)
continue;
if (i != good)
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
index 15278c1..54226de 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
@@ -21,7 +21,9 @@ static void __init *lis331dl_platform_data(void *info)
int intr = get_gpio_by_name("accel_int");
int intr2nd = get_gpio_by_name("accel_2");
- if (intr == -1 || intr2nd == -1)
+ if (intr < 0)
+ return NULL;
+ if (intr2nd < 0)
return NULL;
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
index 94ade10..2c8acbc 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -48,7 +48,7 @@ static void __init *max7315_platform_data(void *info)
gpio_base = get_gpio_by_name(base_pin_name);
intr = get_gpio_by_name(intr_pin_name);
- if (gpio_base == -1)
+ if (gpio_base < 0)
return NULL;
max7315->gpio_base = gpio_base;
if (intr != -1) {
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
index dd28d63..cfe9a47 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
@@ -19,7 +19,7 @@ static void *mpu3050_platform_data(void *info)
struct i2c_board_info *i2c_info = info;
int intr = get_gpio_by_name("mpu3050_int");
- if (intr == -1)
+ if (intr < 0)
return NULL;
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
index d87182a..65c2a9a 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
@@ -26,7 +26,7 @@ static void __init *pmic_gpio_platform_data(void *info)
static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
int gpio_base = get_gpio_by_name("pmic_gpio_base");
- if (gpio_base == -1)
+ if (gpio_base < 0)
gpio_base = 64;
pmic_gpio_pdata.gpio_base = gpio_base;
pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
index 22881c9..33be0b3 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
@@ -34,10 +34,10 @@ static void *tca6416_platform_data(void *info)
gpio_base = get_gpio_by_name(base_pin_name);
intr = get_gpio_by_name(intr_pin_name);
- if (gpio_base == -1)
+ if (gpio_base < 0)
return NULL;
tca6416.gpio_base = gpio_base;
- if (intr != -1) {
+ if (intr >= 0) {
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
} else {
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
index 4f702f5..e0bd082 100644
--- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c
+++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
@@ -22,7 +22,6 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <asm/fixmap.h>
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index f90e290..1bbedc4 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -35,6 +35,8 @@
#include <asm/apb_timer.h>
#include <asm/reboot.h>
+#include "intel_mid_weak_decls.h"
+
/*
* the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
* cmdline option x86_intel_mid_timer can be used to override the configuration
@@ -58,12 +60,16 @@
enum intel_mid_timer_options intel_mid_timer_options;
+/* intel_mid_ops to store sub arch ops */
+struct intel_mid_ops *intel_mid_ops;
+/* getter function for sub arch ops*/
+static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
enum intel_mid_cpu_type __intel_mid_cpu_chip;
EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
static void intel_mid_power_off(void)
{
-}
+};
static void intel_mid_reboot(void)
{
@@ -72,32 +78,6 @@ static void intel_mid_reboot(void)
static unsigned long __init intel_mid_calibrate_tsc(void)
{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = PENWELL_FSB_FREQ_83SKU;
- else
- fsb = PENWELL_FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
-
- if (fast_calibrate)
- return fast_calibrate;
-
return 0;
}
@@ -125,13 +105,37 @@ static void __init intel_mid_time_init(void)
static void intel_mid_arch_setup(void)
{
- if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
- __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
- else {
+ if (boot_cpu_data.x86 != 6) {
pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ goto out;
}
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x35:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW;
+ break;
+ case 0x3C:
+ case 0x4A:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+ break;
+ case 0x27:
+ default:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ break;
+ }
+
+ if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
+ intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
+ else {
+ intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
+ pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+ }
+
+out:
+ if (intel_mid_ops->arch_setup)
+ intel_mid_ops->arch_setup();
}
/* MID systems don't have i8042 controller */
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
new file mode 100644
index 0000000..a537ffc
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
@@ -0,0 +1,19 @@
+/*
+ * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/* __attribute__((weak)) makes these declarations overridable */
+/* For every CPU addition a new get_<cpuname>_ops interface needs
+ * to be added.
+ */
+extern void * __cpuinit get_penwell_ops(void) __attribute__((weak));
+extern void * __cpuinit get_cloverview_ops(void) __attribute__((weak));
+extern void * __init get_tangier_ops(void) __attribute__((weak));
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
new file mode 100644
index 0000000..4f7884e
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -0,0 +1,75 @@
+/*
+ * mfld.c: Intel Medfield platform setup code
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+
+#include <asm/apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+
+#include "intel_mid_weak_decls.h"
+
+static void penwell_arch_setup(void);
+/* penwell arch ops */
+static struct intel_mid_ops penwell_ops = {
+ .arch_setup = penwell_arch_setup,
+};
+
+static void mfld_power_off(void)
+{
+}
+
+static unsigned long __init mfld_calibrate_tsc(void)
+{
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb;
+
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("read a zero ratio, should be incorrect!\n");
+ pr_err("force tsc ratio to 16 ...\n");
+ ratio = 16;
+ }
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ if ((lo & 0x7) == 0x7)
+ fsb = FSB_FREQ_83SKU;
+ else
+ fsb = FSB_FREQ_100SKU;
+ fast_calibrate = ratio * fsb;
+ pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
+ lapic_timer_frequency = fsb * 1000 / HZ;
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ return 0;
+}
+
+static void __init penwell_arch_setup()
+{
+ x86_platform.calibrate_tsc = mfld_calibrate_tsc;
+ pm_power_off = mfld_power_off;
+}
+
+void * __cpuinit get_penwell_ops()
+{
+ return &penwell_ops;
+}
+
+void * __cpuinit get_cloverview_ops()
+{
+ return &penwell_ops;
+}
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
new file mode 100644
index 0000000..09d1015
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mrfl.c
@@ -0,0 +1,103 @@
+/*
+ * mrfl.c: Intel Merrifield platform specific setup code
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+
+#include <asm/apic.h>
+#include <asm/intel-mid.h>
+
+#include "intel_mid_weak_decls.h"
+
+static unsigned long __init tangier_calibrate_tsc(void)
+{
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb, bus_freq;
+
+ /* *********************** */
+ /* Compute TSC:Ratio * FSB */
+ /* *********************** */
+
+ /* Compute Ratio */
+ rdmsr(MSR_PLATFORM_INFO, lo, hi);
+ pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
+
+ ratio = (lo >> 8) & 0xFF;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
+ ratio = 4;
+ }
+
+ /* Compute FSB */
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
+ hi, lo);
+
+ bus_freq = lo & 0x7;
+ pr_debug("bus_freq = 0x%x\n", bus_freq);
+
+ if (bus_freq == 0)
+ fsb = FSB_FREQ_100SKU;
+ else if (bus_freq == 1)
+ fsb = FSB_FREQ_100SKU;
+ else if (bus_freq == 2)
+ fsb = FSB_FREQ_133SKU;
+ else if (bus_freq == 3)
+ fsb = FSB_FREQ_167SKU;
+ else if (bus_freq == 4)
+ fsb = FSB_FREQ_83SKU;
+ else if (bus_freq == 5)
+ fsb = FSB_FREQ_400SKU;
+ else if (bus_freq == 6)
+ fsb = FSB_FREQ_267SKU;
+ else if (bus_freq == 7)
+ fsb = FSB_FREQ_333SKU;
+ else {
+ BUG();
+ pr_err("Invalid bus_freq! Setting to minimal value!\n");
+ fsb = FSB_FREQ_100SKU;
+ }
+
+ /* TSC = FSB Freq * Resolved HFM Ratio */
+ fast_calibrate = ratio * fsb;
+ pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
+
+ /* ************************************ */
+ /* Calculate Local APIC Timer Frequency */
+ /* ************************************ */
+ lapic_timer_frequency = (fsb * 1000) / HZ;
+
+ pr_debug("Setting lapic_timer_frequency = %d\n",
+ lapic_timer_frequency);
+
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ return 0;
+}
+
+static void __init tangier_arch_setup(void)
+{
+ x86_platform.calibrate_tsc = tangier_calibrate_tsc;
+}
+
+/* tangier arch ops */
+static struct intel_mid_ops tangier_ops = {
+ .arch_setup = tangier_arch_setup,
+};
+
+void * __cpuinit get_tangier_ops()
+{
+ return &tangier_ops;
+}
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index c84c1ca..994c40b 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -224,7 +224,7 @@ int get_gpio_by_name(const char *name)
if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
return pentry->pin_no;
}
- return -1;
+ return -EINVAL;
}
void __init intel_scu_device_register(struct platform_device *pdev)
@@ -250,7 +250,7 @@ static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
sdev->modalias);
return;
}
- memcpy(new_dev, sdev, sizeof(*sdev));
+ *new_dev = *sdev;
spi_devs[spi_next_dev++] = new_dev;
}
@@ -271,7 +271,7 @@ static void __init intel_scu_i2c_device_register(int bus,
idev->type);
return;
}
- memcpy(new_dev, idev, sizeof(*idev));
+ *new_dev = *idev;
i2c_bus[i2c_next_dev] = bus;
i2c_devs[i2c_next_dev++] = new_dev;
@@ -337,6 +337,8 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
pentry->name, pentry->irq);
pdata = intel_mid_sfi_get_pdata(dev, pentry);
+ if (IS_ERR(pdata))
+ return;
pdev = platform_device_alloc(pentry->name, 0);
if (pdev == NULL) {
@@ -370,6 +372,8 @@ static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
spi_info.chip_select);
pdata = intel_mid_sfi_get_pdata(dev, &spi_info);
+ if (IS_ERR(pdata))
+ return;
spi_info.platform_data = pdata;
if (dev->delay)
@@ -395,6 +399,8 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
i2c_info.addr);
pdata = intel_mid_sfi_get_pdata(dev, &i2c_info);
i2c_info.platform_data = pdata;
+ if (IS_ERR(pdata))
+ return;
if (dev->delay)
intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
@@ -443,13 +449,35 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
* so we have to enable them one by one here
*/
ioapic = mp_find_ioapic(irq);
- irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = irq;
- irq_attr.trigger = 1;
- irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, irq, &irq_attr);
- } else
+ if (ioapic >= 0) {
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = 1;
+ if (intel_mid_identify_cpu() ==
+ INTEL_MID_CPU_CHIP_TANGIER) {
+ if (!strncmp(pentry->name,
+ "r69001-ts-i2c", 13))
+ /* active low */
+ irq_attr.polarity = 1;
+ else if (!strncmp(pentry->name,
+ "synaptics_3202", 14))
+ /* active low */
+ irq_attr.polarity = 1;
+ else if (irq == 41)
+ /* fast_int_1 */
+ irq_attr.polarity = 1;
+ else
+ /* active high */
+ irq_attr.polarity = 0;
+ } else {
+ /* PNW and CLV go with active low */
+ irq_attr.polarity = 1;
+ }
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ }
+ } else {
irq = 0; /* No irq */
+ }
dev = get_device_id(pentry->type, pentry->name);
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index e6cb80f..4d171e8 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/pm.h>
#include <asm/io.h>
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index efe4d72..dfe605a 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -433,15 +433,49 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
return;
}
-static inline unsigned long cycles_2_us(unsigned long long cyc)
+/*
+ * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
+ * number, not an absolute. It converts a duration in cycles to a duration in
+ * ns.
+ */
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
+ struct cyc2ns_data *data = cyc2ns_read_begin();
unsigned long long ns;
- unsigned long us;
- int cpu = smp_processor_id();
- ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
- us = ns / 1000;
- return us;
+ ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
+
+ cyc2ns_read_end(data);
+ return ns;
+}
+
+/*
+ * The reverse of the above; converts a duration in ns to a duration in cycles.
+ */
+static inline unsigned long long ns_2_cycles(unsigned long long ns)
+{
+ struct cyc2ns_data *data = cyc2ns_read_begin();
+ unsigned long long cyc;
+
+ cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
+
+ cyc2ns_read_end(data);
+ return cyc;
+}
+
+static inline unsigned long cycles_2_us(unsigned long long cyc)
+{
+ return cycles_2_ns(cyc) / NSEC_PER_USEC;
+}
+
+static inline cycles_t sec_2_cycles(unsigned long sec)
+{
+ return ns_2_cycles(sec * NSEC_PER_SEC);
+}
+
+static inline unsigned long long usec_2_cycles(unsigned long usec)
+{
+ return ns_2_cycles(usec * NSEC_PER_USEC);
}
/*
@@ -668,16 +702,6 @@ static int wait_completion(struct bau_desc *bau_desc,
bcp, try);
}
-static inline cycles_t sec_2_cycles(unsigned long sec)
-{
- unsigned long ns;
- cycles_t cyc;
-
- ns = sec * 1000000000;
- cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
- return cyc;
-}
-
/*
* Our retries are blocked by all destination sw ack resources being
* in use, and a timeout is pending. In that case hardware immediately
@@ -1327,16 +1351,6 @@ static void ptc_seq_stop(struct seq_file *file, void *data)
{
}
-static inline unsigned long long usec_2_cycles(unsigned long microsec)
-{
- unsigned long ns;
- unsigned long long cyc;
-
- ns = microsec * 1000;
- cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
- return cyc;
-}
-
/*
* Display the statistics thru /proc/sgi_uv/ptc_statistics
* 'data' points to the cpu number
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index a44f457..bad628a 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -29,12 +29,10 @@ void __init reserve_real_mode(void)
void __init setup_real_mode(void)
{
u16 real_mode_seg;
- u32 *rel;
+ const u32 *rel;
u32 count;
- u32 *ptr;
- u16 *seg;
- int i;
unsigned char *base;
+ unsigned long phys_base;
struct trampoline_header *trampoline_header;
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
#ifdef CONFIG_X86_64
@@ -46,23 +44,23 @@ void __init setup_real_mode(void)
memcpy(base, real_mode_blob, size);
- real_mode_seg = __pa(base) >> 4;
+ phys_base = __pa(base);
+ real_mode_seg = phys_base >> 4;
+
rel = (u32 *) real_mode_relocs;
/* 16-bit segment relocations. */
- count = rel[0];
- rel = &rel[1];
- for (i = 0; i < count; i++) {
- seg = (u16 *) (base + rel[i]);
+ count = *rel++;
+ while (count--) {
+ u16 *seg = (u16 *) (base + *rel++);
*seg = real_mode_seg;
}
/* 32-bit linear relocations. */
- count = rel[i];
- rel = &rel[i + 1];
- for (i = 0; i < count; i++) {
- ptr = (u32 *) (base + rel[i]);
- *ptr += __pa(base);
+ count = *rel++;
+ while (count--) {
+ u32 *ptr = (u32 *) (base + *rel++);
+ *ptr += phys_base;
}
/* Must be perfomed *after* relocation. */
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index f932ea6..d66c607 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -1,5 +1,4 @@
#include <linux/linkage.h>
-#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
index c1b2791..48ddd76 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -20,7 +20,6 @@
*/
#include <linux/linkage.h>
-#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include "realmode.h"
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index bb360dc..dac7b20 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -25,7 +25,6 @@
*/
#include <linux/linkage.h>
-#include <linux/init.h>
#include <asm/pgtable_types.h>
#include <asm/page_types.h>
#include <asm/msr.h>
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index aabfb83..96bc506 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -357,3 +357,5 @@
348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
349 i386 kcmp sys_kcmp
350 i386 finit_module sys_finit_module
+351 i386 sched_setattr sys_sched_setattr
+352 i386 sched_getattr sys_sched_getattr
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 38ae65d..a12bddc 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -320,6 +320,8 @@
311 64 process_vm_writev sys_process_vm_writev
312 common kcmp sys_kcmp
313 common finit_module sys_finit_module
+314 common sched_setattr sys_sched_setattr
+315 common sched_getattr sys_sched_getattr
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index f7bab68..11f9285 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -722,15 +722,25 @@ static void percpu_init(void)
/*
* Check to see if a symbol lies in the .data..percpu section.
- * For some as yet not understood reason the "__init_begin"
- * symbol which immediately preceeds the .data..percpu section
- * also shows up as it it were part of it so we do an explict
- * check for that symbol name and ignore it.
+ *
+ * The linker incorrectly associates some symbols with the
+ * .data..percpu section so we also need to check the symbol
+ * name to make sure that we classify the symbol correctly.
+ *
+ * The GNU linker incorrectly associates:
+ * __init_begin
+ * __per_cpu_load
+ *
+ * The "gold" linker incorrectly associates:
+ * init_per_cpu__irq_stack_union
+ * init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
{
return (sym->st_shndx == per_cpu_shndx) &&
- strcmp(symname, "__init_begin");
+ strcmp(symname, "__init_begin") &&
+ strcmp(symname, "__per_cpu_load") &&
+ strncmp(symname, "init_per_cpu_", 13);
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 2ada505..eb5d7a5 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
ts->tv_nsec = 0;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->wall_time_snsec;
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
ts->tv_nsec = 0;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec;
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
{
unsigned long seq;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
{
unsigned long seq;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 01f5e3b..1e13eb8 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,6 +1,5 @@
#include <asm/page_types.h>
#include <linux/linkage.h>
-#include <linux/init.h>
__PAGE_ALIGNED_DATA
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S
index d6b9a7f..295f1c7 100644
--- a/arch/x86/vdso/vdsox32.S
+++ b/arch/x86/vdso/vdsox32.S
@@ -1,6 +1,5 @@
#include <asm/page_types.h>
#include <linux/linkage.h>
-#include <linux/init.h>
__PAGE_ALIGNED_DATA
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index ef02167..e1ee6b5 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -9,21 +9,14 @@
#ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H
-#define smp_read_barrier_depends() do { } while(0)
-#define read_barrier_depends() do { } while(0)
-
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
#ifdef CONFIG_SMP
#error smp_* not defined
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
#endif
-#define set_mb(var, value) do { var = value; mb(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* _XTENSA_SYSTEM_H */
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ba6cf8e..b91ce75 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
void blk_mq_unregister_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ kobject_del(&ctx->kobj);
+ kobject_put(&ctx->kobj);
+ }
+ kobject_del(&hctx->kobj);
+ kobject_put(&hctx->kobj);
+ }
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
+ kobject_put(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d92485..4770de5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
config ACPI_EXTLOG
tristate "Extended Error Log support"
depends on X86_MCE && X86_LOCAL_APIC
- select EFI
select UEFI_CPER
default n
help
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8711e37..3c2e4aa 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
goto end;
result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
- ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+ ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
if (result) {
power_supply_unregister(&ac->charger);
goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
return -EINVAL;
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
- ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+ ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index a6869e1..5d33c54 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -12,6 +12,7 @@
#include <acpi/acpi_bus.h>
#include <linux/cper.h>
#include <linux/ratelimit.h>
+#include <linux/edac.h>
#include <asm/cpu.h>
#include <asm/mce.h>
@@ -43,6 +44,8 @@ struct extlog_l1_head {
u8 rev1[12];
};
+static int old_edac_report_status;
+
static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295";
/* L1 table related physical address */
@@ -150,7 +153,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
- return NOTIFY_DONE;
+ return NOTIFY_STOP;
}
static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret)
@@ -231,8 +234,12 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- rc = -ENODEV;
+ if (get_edac_report_status() == EDAC_REPORTING_FORCE) {
+ pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n");
+ return -EPERM;
+ }
+ rc = -ENODEV;
rdmsrl(MSR_IA32_MCG_CAP, cap);
if (!(cap & MCG_ELOG_P))
return rc;
@@ -287,6 +294,12 @@ static int __init extlog_init(void)
if (elog_buf == NULL)
goto err_release_elog;
+ /*
+ * eMCA event report method has higher priority than EDAC method,
+ * unless EDAC event report method is mandatory.
+ */
+ old_edac_report_status = get_edac_report_status();
+ set_edac_report_status(EDAC_REPORTING_DISABLED);
mce_register_decode_chain(&extlog_mce_dec);
/* enable OS to be involved to take over management from BIOS */
((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
@@ -308,6 +321,7 @@ err:
static void __exit extlog_exit(void)
{
+ set_edac_report_status(old_edac_report_status);
mce_unregister_decode_chain(&extlog_mce_dec);
((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
if (extlog_l1_addr)
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index fc6008f..509452a 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings();
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(power_saving_mwait_eax, 1);
+ mwait_idle_with_hints(power_saving_mwait_eax, 1);
start_critical_timings();
if (lapic_marked_unstable)
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294b..3650b21 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
select MISC_FILESYSTEMS
select PSTORE
- select EFI
select UEFI_CPER
depends on X86
help
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 6d2c49b..e55584a 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -41,6 +41,7 @@
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
+#include <asm/unaligned.h>
#include "apei-internal.h"
@@ -567,8 +568,7 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
bit_offset = reg->bit_offset;
access_size_code = reg->access_width;
space_id = reg->space_id;
- /* Handle possible alignment issues */
- memcpy(paddr, &reg->address, sizeof(*paddr));
+ *paddr = get_unaligned(&reg->address);
if (!*paddr) {
pr_warning(FW_BUG APEI_PFX
"Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index fb57d03..7dcc8a8 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -34,6 +34,7 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <acpi/acpi.h>
+#include <asm/unaligned.h>
#include "apei-internal.h"
@@ -216,7 +217,7 @@ static void check_vendor_extension(u64 paddr,
static void *einj_get_parameter_address(void)
{
int i;
- u64 paddrv4 = 0, paddrv5 = 0;
+ u64 pa_v4 = 0, pa_v5 = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
@@ -225,30 +226,28 @@ static void *einj_get_parameter_address(void)
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddrv4, &entry->register_region.address,
- sizeof(paddrv4));
+ pa_v4 = get_unaligned(&entry->register_region.address);
if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddrv5, &entry->register_region.address,
- sizeof(paddrv5));
+ pa_v5 = get_unaligned(&entry->register_region.address);
entry++;
}
- if (paddrv5) {
+ if (pa_v5) {
struct set_error_type_with_address *v5param;
- v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param));
+ v5param = acpi_os_map_memory(pa_v5, sizeof(*v5param));
if (v5param) {
acpi5 = 1;
- check_vendor_extension(paddrv5, v5param);
+ check_vendor_extension(pa_v5, v5param);
return v5param;
}
}
- if (param_extension && paddrv4) {
+ if (param_extension && pa_v4) {
struct einj_parameter *v4param;
- v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
+ v4param = acpi_os_map_memory(pa_v4, sizeof(*v4param));
if (!v4param)
return NULL;
if (v4param->reserved1 || v4param->reserved2) {
@@ -416,7 +415,8 @@ out:
return rc;
}
-static int __einj_error_inject(u32 type, u64 param1, u64 param2)
+static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
+ u64 param3, u64 param4)
{
struct apei_exec_context ctx;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
@@ -446,6 +446,12 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
break;
}
v5param->flags = vendor_flags;
+ } else if (flags) {
+ v5param->flags = flags;
+ v5param->memory_address = param1;
+ v5param->memory_address_range = param2;
+ v5param->apicid = param3;
+ v5param->pcie_sbdf = param4;
} else {
switch (type) {
case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -514,11 +520,17 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
}
/* Inject the specified hardware error */
-static int einj_error_inject(u32 type, u64 param1, u64 param2)
+static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
+ u64 param3, u64 param4)
{
int rc;
unsigned long pfn;
+ /* If user manually set "flags", make sure it is legal */
+ if (flags && (flags &
+ ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
+ return -EINVAL;
+
/*
* We need extra sanity checks for memory errors.
* Other types leap directly to injection.
@@ -532,7 +544,7 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2)
if (type & ACPI5_VENDOR_BIT) {
if (vendor_flags != SETWA_FLAGS_MEM)
goto inject;
- } else if (!(type & MEM_ERROR_MASK))
+ } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM))
goto inject;
/*
@@ -546,15 +558,18 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2)
inject:
mutex_lock(&einj_mutex);
- rc = __einj_error_inject(type, param1, param2);
+ rc = __einj_error_inject(type, flags, param1, param2, param3, param4);
mutex_unlock(&einj_mutex);
return rc;
}
static u32 error_type;
+static u32 error_flags;
static u64 error_param1;
static u64 error_param2;
+static u64 error_param3;
+static u64 error_param4;
static struct dentry *einj_debug_dir;
static int available_error_type_show(struct seq_file *m, void *v)
@@ -648,7 +663,8 @@ static int error_inject_set(void *data, u64 val)
if (!error_type)
return -EINVAL;
- return einj_error_inject(error_type, error_param1, error_param2);
+ return einj_error_inject(error_type, error_flags, error_param1, error_param2,
+ error_param3, error_param4);
}
DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
@@ -729,6 +745,10 @@ static int __init einj_init(void)
rc = -ENOMEM;
einj_param = einj_get_parameter_address();
if ((param_extension || acpi5) && einj_param) {
+ fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_flags);
+ if (!fentry)
+ goto err_unmap;
fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
einj_debug_dir, &error_param1);
if (!fentry)
@@ -737,6 +757,14 @@ static int __init einj_init(void)
einj_debug_dir, &error_param2);
if (!fentry)
goto err_unmap;
+ fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param3);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param4);
+ if (!fentry)
+ goto err_unmap;
fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
einj_debug_dir, &notrigger);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cb1d557..ed65e9c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -611,7 +611,7 @@ static void __erst_record_id_cache_compact(void)
if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
continue;
if (wpos != i)
- memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
+ entries[wpos] = entries[i];
wpos++;
}
erst_record_id_cache.len = wpos;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index a30bc31..46766ef 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -413,27 +413,31 @@ static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
{
#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
+ int flags = -1;
int sec_sev = ghes_severity(gdata->error_severity);
struct cper_sec_mem_err *mem_err;
mem_err = (struct cper_sec_mem_err *)(gdata + 1);
- if (sec_sev == GHES_SEV_CORRECTED &&
- (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) &&
- (mem_err->validation_bits & CPER_MEM_VALID_PA)) {
- pfn = mem_err->physical_addr >> PAGE_SHIFT;
- if (pfn_valid(pfn))
- memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
- else if (printk_ratelimit())
- pr_warn(FW_WARN GHES_PFX
- "Invalid address in generic error data: %#llx\n",
- mem_err->physical_addr);
- }
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- mem_err->validation_bits & CPER_MEM_VALID_PA) {
- pfn = mem_err->physical_addr >> PAGE_SHIFT;
- memory_failure_queue(pfn, 0, 0);
+ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
+ return;
+
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ if (!pfn_valid(pfn)) {
+ pr_warn_ratelimited(FW_WARN GHES_PFX
+ "Invalid address in generic error data: %#llx\n",
+ mem_err->physical_addr);
+ return;
}
+
+ /* iff following two events can be handled properly by now */
+ if (sec_sev == GHES_SEV_CORRECTED &&
+ (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
+ flags = MF_SOFT_OFFLINE;
+ if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
+ flags = 0;
+
+ if (flags != -1)
+ memory_failure_queue(pfn, 0, flags);
#endif
}
@@ -453,8 +457,7 @@ static void ghes_do_proc(struct ghes *ghes,
ghes_edac_report_mem_error(ghes, sev, mem_err);
#ifdef CONFIG_X86_MCE
- apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
- mem_err);
+ apei_mce_report_mem_error(sev, mem_err);
#endif
ghes_handle_memory_failure(gdata, sev);
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fbf1ace..5876a49 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
+static int battery_bix_broken_package;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
return -ENODEV;
}
- if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
+
+ if (battery_bix_broken_package)
+ result = extract_package(battery, buffer.pointer,
+ extended_info_offsets + 1,
+ ARRAY_SIZE(extended_info_offsets) - 1);
+ else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
result = extract_package(battery, buffer.pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
return 0;
}
+static struct dmi_system_id bat_dmi_table[] = {
+ {
+ .ident = "NEC LZ750/LS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
+ },
+ },
+ {},
+};
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
+
+ if (dmi_check_system(bat_dmi_table))
+ battery_bix_broken_package = 1;
acpi_bus_register_driver(&acpi_battery_driver);
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72..0710004 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
}
EXPORT_SYMBOL(acpi_bus_get_private_data);
+void acpi_bus_no_hotplug(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ if (adev)
+ adev->flags.no_hotplug = true;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
+
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 644516d..f90c56c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
- if (cx->entry_method == ACPI_CSTATE_FFH) {
- if (current_set_polling_and_test())
- return -EINVAL;
- }
-
lapic_timer_state_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx);
@@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
- if (cx->entry_method == ACPI_CSTATE_FFH) {
- if (current_set_polling_and_test())
- return -EINVAL;
- }
-
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
}
- if (cx->entry_method == ACPI_CSTATE_FFH) {
- if (current_set_polling_and_test())
- return -EINVAL;
- }
-
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14f1e95..e3a92a6 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
+ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
@@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* AHCI controllers often implement SFF compatible interface.
- * Grab all PCI BARs just in case.
- */
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
- if (rc == -EBUSY)
- pcim_pin_device(pdev);
- if (rc)
- return rc;
-
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
u8 map;
@@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73f..3e23e99 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
/*
* set PHY Paremeters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
- * is 0x07fffffd, and the other one write for setting
+ * is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
*/
regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
| IMX6Q_GPR13_SATA_TX_ATTEN_MASK
| IMX6Q_GPR13_SATA_TX_BOOST_MASK
| IMX6Q_GPR13_SATA_TX_LVL_MASK
+ | IMX6Q_GPR13_SATA_MPLL_CLK_EN
| IMX6Q_GPR13_SATA_TX_EDGE_RATE
, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
| IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75b9367..1393a58 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to get NCQ Send/Recv Log Emask 0x%x\n",
err_mask);
} else {
+ u8 *cmds = dev->ncq_send_recv_cmds;
+
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
- ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+ if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ ata_dev_dbg(dev, "disabling queued TRIM support\n");
+ cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+ ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+ }
}
}
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
+ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
/* End Marker */
{ }
};
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
{ "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
+ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ab58556..377eb88 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
return;
}
+ /*
+ * XXX - UGLY HACK
+ *
+ * The block layer suspend/resume path is fundamentally broken due
+ * to freezable kthreads and workqueue and may deadlock if a block
+ * device gets removed while resume is in progress. I don't know
+ * what the solution is short of removing freezable kthreads and
+ * workqueues altogether.
+ *
+ * The following is an ugly hack to avoid kicking off device
+ * removal while freezer is active. This is a joke but does avoid
+ * this particular deadlock scenario.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=62801
+ * http://marc.info/?l=linux-kernel&m=138695698516487
+ */
+#ifdef CONFIG_FREEZER
+ while (pm_freezing)
+ msleep(10);
+#endif
+
DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca09..1ad2f62 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
};
static struct scsi_host_template sis_sht = {
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 94e8a80..870ecfd 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -1,6 +1,6 @@
# Makefile for the Linux device tree
-obj-y := core.o bus.o dd.o syscore.o \
+obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 73f6c29..59dc808 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -146,8 +146,19 @@ void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
}
EXPORT_SYMBOL_GPL(bus_remove_file);
+static void bus_release(struct kobject *kobj)
+{
+ struct subsys_private *priv =
+ container_of(kobj, typeof(*priv), subsys.kobj);
+ struct bus_type *bus = priv->bus;
+
+ kfree(priv);
+ bus->p = NULL;
+}
+
static struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
+ .release = bus_release,
};
static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -953,8 +964,6 @@ void bus_unregister(struct bus_type *bus)
kset_unregister(bus->p->devices_kset);
bus_remove_file(bus, &bus_attr_uevent);
kset_unregister(&bus->p->subsys);
- kfree(bus->p);
- bus->p = NULL;
}
EXPORT_SYMBOL_GPL(bus_unregister);
diff --git a/drivers/base/component.c b/drivers/base/component.c
new file mode 100644
index 0000000..c53efe6
--- /dev/null
+++ b/drivers/base/component.c
@@ -0,0 +1,382 @@
+/*
+ * Componentized device handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is work in progress. We gather up the component devices into a list,
+ * and bind them when instructed. At the moment, we're specific to the DRM
+ * subsystem, and only handles one master device, but this doesn't have to be
+ * the case.
+ */
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+struct master {
+ struct list_head node;
+ struct list_head components;
+ bool bound;
+
+ const struct component_master_ops *ops;
+ struct device *dev;
+};
+
+struct component {
+ struct list_head node;
+ struct list_head master_node;
+ struct master *master;
+ bool bound;
+
+ const struct component_ops *ops;
+ struct device *dev;
+};
+
+static DEFINE_MUTEX(component_mutex);
+static LIST_HEAD(component_list);
+static LIST_HEAD(masters);
+
+static struct master *__master_find(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *m;
+
+ list_for_each_entry(m, &masters, node)
+ if (m->dev == dev && (!ops || m->ops == ops))
+ return m;
+
+ return NULL;
+}
+
+/* Attach an unattached component to a master. */
+static void component_attach_master(struct master *master, struct component *c)
+{
+ c->master = master;
+
+ list_add_tail(&c->master_node, &master->components);
+}
+
+/* Detach a component from a master. */
+static void component_detach_master(struct master *master, struct component *c)
+{
+ list_del(&c->master_node);
+
+ c->master = NULL;
+}
+
+int component_master_add_child(struct master *master,
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ struct component *c;
+ int ret = -ENXIO;
+
+ list_for_each_entry(c, &component_list, node) {
+ if (c->master)
+ continue;
+
+ if (compare(c->dev, compare_data)) {
+ component_attach_master(master, c);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_master_add_child);
+
+/* Detach all attached components from this master */
+static void master_remove_components(struct master *master)
+{
+ while (!list_empty(&master->components)) {
+ struct component *c = list_first_entry(&master->components,
+ struct component, master_node);
+
+ WARN_ON(c->master != master);
+
+ component_detach_master(master, c);
+ }
+}
+
+/*
+ * Try to bring up a master. If component is NULL, we're interested in
+ * this master, otherwise it's a component which must be present to try
+ * and bring up the master.
+ *
+ * Returns 1 for successful bringup, 0 if not ready, or -ve errno.
+ */
+static int try_to_bring_up_master(struct master *master,
+ struct component *component)
+{
+ int ret = 0;
+
+ if (!master->bound) {
+ /*
+ * Search the list of components, looking for components that
+ * belong to this master, and attach them to the master.
+ */
+ if (master->ops->add_components(master->dev, master)) {
+ /* Failed to find all components */
+ master_remove_components(master);
+ ret = 0;
+ goto out;
+ }
+
+ if (component && component->master != master) {
+ master_remove_components(master);
+ ret = 0;
+ goto out;
+ }
+
+ /* Found all components */
+ ret = master->ops->bind(master->dev);
+ if (ret < 0) {
+ master_remove_components(master);
+ goto out;
+ }
+
+ master->bound = true;
+ ret = 1;
+ }
+out:
+
+ return ret;
+}
+
+static int try_to_bring_up_masters(struct component *component)
+{
+ struct master *m;
+ int ret = 0;
+
+ list_for_each_entry(m, &masters, node) {
+ ret = try_to_bring_up_master(m, component);
+ if (ret != 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void take_down_master(struct master *master)
+{
+ if (master->bound) {
+ master->ops->unbind(master->dev);
+ master->bound = false;
+ }
+
+ master_remove_components(master);
+}
+
+int component_master_add(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *master;
+ int ret;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = dev;
+ master->ops = ops;
+ INIT_LIST_HEAD(&master->components);
+
+ /* Add to the list of available masters. */
+ mutex_lock(&component_mutex);
+ list_add(&master->node, &masters);
+
+ ret = try_to_bring_up_master(master, NULL);
+
+ if (ret < 0) {
+ /* Delete off the list if we weren't successful */
+ list_del(&master->node);
+ kfree(master);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_master_add);
+
+void component_master_del(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *master;
+
+ mutex_lock(&component_mutex);
+ master = __master_find(dev, ops);
+ if (master) {
+ take_down_master(master);
+
+ list_del(&master->node);
+ kfree(master);
+ }
+ mutex_unlock(&component_mutex);
+}
+EXPORT_SYMBOL_GPL(component_master_del);
+
+static void component_unbind(struct component *component,
+ struct master *master, void *data)
+{
+ WARN_ON(!component->bound);
+
+ component->ops->unbind(component->dev, master->dev, data);
+ component->bound = false;
+
+ /* Release all resources claimed in the binding of this component */
+ devres_release_group(component->dev, component);
+}
+
+void component_unbind_all(struct device *master_dev, void *data)
+{
+ struct master *master;
+ struct component *c;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ master = __master_find(master_dev, NULL);
+ if (!master)
+ return;
+
+ list_for_each_entry_reverse(c, &master->components, master_node)
+ component_unbind(c, master, data);
+}
+EXPORT_SYMBOL_GPL(component_unbind_all);
+
+static int component_bind(struct component *component, struct master *master,
+ void *data)
+{
+ int ret;
+
+ /*
+ * Each component initialises inside its own devres group.
+ * This allows us to roll-back a failed component without
+ * affecting anything else.
+ */
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ /*
+ * Also open a group for the device itself: this allows us
+ * to release the resources claimed against the sub-device
+ * at the appropriate moment.
+ */
+ if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
+ devres_release_group(master->dev, NULL);
+ return -ENOMEM;
+ }
+
+ dev_dbg(master->dev, "binding %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+
+ ret = component->ops->bind(component->dev, master->dev, data);
+ if (!ret) {
+ component->bound = true;
+
+ /*
+ * Close the component device's group so that resources
+ * allocated in the binding are encapsulated for removal
+ * at unbind. Remove the group on the DRM device as we
+ * can clean those resources up independently.
+ */
+ devres_close_group(component->dev, NULL);
+ devres_remove_group(master->dev, NULL);
+
+ dev_info(master->dev, "bound %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+ } else {
+ devres_release_group(component->dev, NULL);
+ devres_release_group(master->dev, NULL);
+
+ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
+ }
+
+ return ret;
+}
+
+int component_bind_all(struct device *master_dev, void *data)
+{
+ struct master *master;
+ struct component *c;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ master = __master_find(master_dev, NULL);
+ if (!master)
+ return -EINVAL;
+
+ list_for_each_entry(c, &master->components, master_node) {
+ ret = component_bind(c, master, data);
+ if (ret)
+ break;
+ }
+
+ if (ret != 0) {
+ list_for_each_entry_continue_reverse(c, &master->components,
+ master_node)
+ component_unbind(c, master, data);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_bind_all);
+
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+ struct component *component;
+ int ret;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->ops = ops;
+ component->dev = dev;
+
+ dev_dbg(dev, "adding component (ops %ps)\n", ops);
+
+ mutex_lock(&component_mutex);
+ list_add_tail(&component->node, &component_list);
+
+ ret = try_to_bring_up_masters(component);
+ if (ret < 0) {
+ list_del(&component->node);
+
+ kfree(component);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_add);
+
+void component_del(struct device *dev, const struct component_ops *ops)
+{
+ struct component *c, *component = NULL;
+
+ mutex_lock(&component_mutex);
+ list_for_each_entry(c, &component_list, node)
+ if (c->dev == dev && c->ops == ops) {
+ list_del(&c->node);
+ component = c;
+ break;
+ }
+
+ if (component && component->master)
+ take_down_master(component->master);
+
+ mutex_unlock(&component_mutex);
+
+ WARN_ON(!component);
+ kfree(component);
+}
+EXPORT_SYMBOL_GPL(component_del);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 67b180d..2b56717 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -491,11 +491,13 @@ static int device_add_attrs(struct device *dev)
if (device_supports_offline(dev) && !dev->offline_disabled) {
error = device_create_file(dev, &dev_attr_online);
if (error)
- goto err_remove_type_groups;
+ goto err_remove_dev_groups;
}
return 0;
+ err_remove_dev_groups:
+ device_remove_groups(dev, dev->groups);
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
@@ -1603,6 +1605,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
goto error;
}
+ device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
@@ -1614,7 +1617,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
if (retval)
goto error;
- retval = device_register(dev);
+ retval = device_add(dev);
if (retval)
goto error;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 0f38201..25798db 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -299,7 +299,7 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
- int deleted = 1;
+ int deleted = 0;
int err;
dentry = kern_path_locked(nodename, &parent);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index eb8fb94..8a97ddf 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -96,6 +96,15 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
+/* firmware behavior options */
+#define FW_OPT_UEVENT (1U << 0)
+#define FW_OPT_NOWAIT (1U << 1)
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+#define FW_OPT_FALLBACK (1U << 2)
+#else
+#define FW_OPT_FALLBACK 0
+#endif
+
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
spinlock_t lock;
@@ -219,6 +228,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
}
static void __fw_free_buf(struct kref *ref)
+ __releases(&fwc->lock)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
@@ -270,21 +280,21 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
/* Don't inline this: 'struct kstat' is biggish */
-static noinline_for_stack long fw_file_size(struct file *file)
+static noinline_for_stack int fw_file_size(struct file *file)
{
struct kstat st;
if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
- if (st.size != (long)st.size)
+ if (st.size != (int)st.size)
return -1;
return st.size;
}
static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
{
- long size;
+ int size;
char *buf;
int rc;
@@ -820,7 +830,7 @@ static void firmware_class_timeout_work(struct work_struct *work)
static struct firmware_priv *
fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, bool uevent, bool nowait)
+ struct device *device, unsigned int opt_flags)
{
struct firmware_priv *fw_priv;
struct device *f_dev;
@@ -832,7 +842,7 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
goto exit;
}
- fw_priv->nowait = nowait;
+ fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
fw_priv->fw = firmware;
INIT_DELAYED_WORK(&fw_priv->timeout_work,
firmware_class_timeout_work);
@@ -848,8 +858,8 @@ exit:
}
/* load a firmware via user helper */
-static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
- long timeout)
+static int _request_firmware_load(struct firmware_priv *fw_priv,
+ unsigned int opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
@@ -885,7 +895,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
goto err_del_bin_attr;
}
- if (uevent) {
+ if (opt_flags & FW_OPT_UEVENT) {
buf->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
@@ -911,16 +921,16 @@ err_put_dev:
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
- bool uevent, bool nowait, long timeout)
+ unsigned int opt_flags, long timeout)
{
struct firmware_priv *fw_priv;
- fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ fw_priv = fw_create_instance(firmware, name, device, opt_flags);
if (IS_ERR(fw_priv))
return PTR_ERR(fw_priv);
fw_priv->buf = firmware->priv;
- return _request_firmware_load(fw_priv, uevent, timeout);
+ return _request_firmware_load(fw_priv, opt_flags, timeout);
}
#ifdef CONFIG_PM_SLEEP
@@ -942,7 +952,7 @@ static void kill_requests_without_uevent(void)
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int
fw_load_from_user_helper(struct firmware *firmware, const char *name,
- struct device *device, bool uevent, bool nowait,
+ struct device *device, unsigned int opt_flags,
long timeout)
{
return -ENOENT;
@@ -1023,7 +1033,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
}
static int assign_firmware_buf(struct firmware *fw, struct device *device,
- bool skip_cache)
+ unsigned int opt_flags)
{
struct firmware_buf *buf = fw->priv;
@@ -1040,7 +1050,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (device && !skip_cache)
+ /* don't cache firmware handled without uevent */
+ if (device && (opt_flags & FW_OPT_UEVENT))
fw_add_devm_name(device, buf->fw_id);
/*
@@ -1061,7 +1072,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
+ struct device *device, unsigned int opt_flags)
{
struct firmware *fw;
long timeout;
@@ -1076,7 +1087,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
ret = 0;
timeout = firmware_loading_timeout();
- if (nowait) {
+ if (opt_flags & FW_OPT_NOWAIT) {
timeout = usermodehelper_read_lock_wait(timeout);
if (!timeout) {
dev_dbg(device, "firmware: %s loading timed out\n",
@@ -1095,16 +1106,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
ret = fw_get_filesystem_firmware(device, fw->priv);
if (ret) {
- dev_warn(device, "Direct firmware load failed with error %d\n",
- ret);
- dev_warn(device, "Falling back to user helper\n");
- ret = fw_load_from_user_helper(fw, name, device,
- uevent, nowait, timeout);
+ if (opt_flags & FW_OPT_FALLBACK) {
+ dev_warn(device,
+ "Direct firmware load failed with error %d\n",
+ ret);
+ dev_warn(device, "Falling back to user helper\n");
+ ret = fw_load_from_user_helper(fw, name, device,
+ opt_flags, timeout);
+ }
}
- /* don't cache firmware handled without uevent */
if (!ret)
- ret = assign_firmware_buf(fw, device, !uevent);
+ ret = assign_firmware_buf(fw, device, opt_flags);
usermodehelper_read_unlock();
@@ -1146,12 +1159,37 @@ request_firmware(const struct firmware **firmware_p, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device, true, false);
+ ret = _request_firmware(firmware_p, name, device,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_firmware);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * request_firmware: - load firmware directly without usermode helper
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function works pretty much like request_firmware(), but this doesn't
+ * fall back to usermode helper even if the firmware couldn't be loaded
+ * directly from fs. Hence it's useful for loading optional firmwares, which
+ * aren't always present, without extra long timeouts of udev.
+ **/
+int request_firmware_direct(const struct firmware **firmware_p,
+ const char *name, struct device *device)
+{
+ int ret;
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(request_firmware_direct);
+#endif
+
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
@@ -1174,7 +1212,7 @@ struct firmware_work {
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
- bool uevent;
+ unsigned int opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
@@ -1185,7 +1223,7 @@ static void request_firmware_work_func(struct work_struct *work)
fw_work = container_of(work, struct firmware_work, work);
_request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
+ fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
@@ -1233,7 +1271,8 @@ request_firmware_nowait(
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
- fw_work->uevent = uevent;
+ fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
+ (uevent ? FW_OPT_UEVENT : 0);
if (!try_module_get(module)) {
kfree(fw_work);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc1..83a598e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
NULL_Q_MQ = 2,
};
-static int submit_queues = 1;
+static int submit_queues;
module_param(submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
module_param(hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = true;
+static bool use_per_node_hctx = false;
module_param(use_per_node_hctx, bool, S_IRUGO);
-MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
- hctx_index);
+ int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
+ int tip = (reg->nr_hw_queues % nr_online_nodes);
+ int node = 0, i, n;
+
+ /*
+ * Split submit queues evenly wrt to the number of nodes. If uneven,
+ * fill the first buckets with one extra, until the rest is filled with
+ * no extra.
+ */
+ for (i = 0, n = 1; i < hctx_index; i++, n++) {
+ if (n % b_size == 0) {
+ n = 0;
+ node++;
+
+ tip--;
+ if (!tip)
+ b_size = reg->nr_hw_queues / nr_online_nodes;
+ }
+ }
+
+ /*
+ * A node might not be online, therefore map the relative node id to the
+ * real node id.
+ */
+ for_each_online_node(n) {
+ if (!node)
+ break;
+ node--;
+ }
+
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
}
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
kfree(hctx);
}
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+ BUG_ON(!nullb);
+ BUG_ON(!nq);
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+}
+
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct nullb *nullb = data;
struct nullb_queue *nq = &nullb->queues[index];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
- nullb->nr_queues++;
hctx->driver_data = nq;
+ null_init_queue(nullb, nq);
+ nullb->nr_queues++;
return 0;
}
@@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
del_gendisk(nullb->disk);
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_queue(nullb->q);
- else
- blk_cleanup_queue(nullb->q);
+ blk_cleanup_queue(nullb->q);
put_disk(nullb->disk);
kfree(nullb);
}
@@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq)
nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
- return 1;
+ return -ENOMEM;
tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
- return 1;
+ return -ENOMEM;
}
for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb)
static int setup_queues(struct nullb *nullb)
{
- struct nullb_queue *nq;
- int i;
-
- nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+ nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
+ GFP_KERNEL);
if (!nullb->queues)
- return 1;
+ return -ENOMEM;
nullb->nr_queues = 0;
nullb->queue_depth = hw_queue_depth;
- if (queue_mode == NULL_Q_MQ)
- return 0;
+ return 0;
+}
+
+static int init_driver_queues(struct nullb *nullb)
+{
+ struct nullb_queue *nq;
+ int i, ret = 0;
for (i = 0; i < submit_queues; i++) {
nq = &nullb->queues[i];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = hw_queue_depth;
- if (setup_commands(nq))
- break;
+
+ null_init_queue(nullb, nq);
+
+ ret = setup_commands(nq);
+ if (ret)
+ goto err_queue;
nullb->nr_queues++;
}
- if (i == submit_queues)
- return 0;
-
+ return 0;
+err_queue:
cleanup_queues(nullb);
- return 1;
+ return ret;
}
static int null_add_dev(void)
@@ -518,11 +557,13 @@ static int null_add_dev(void)
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
blk_queue_make_request(nullb->q, null_queue_bio);
+ init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
if (nullb->q)
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ init_driver_queues(nullb);
}
if (!nullb->q)
@@ -534,10 +575,7 @@ static int null_add_dev(void)
disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
queue_fail:
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_queue(nullb->q);
- else
- blk_cleanup_queue(nullb->q);
+ blk_cleanup_queue(nullb->q);
cleanup_queues(nullb);
err:
kfree(nullb);
@@ -579,7 +617,13 @@ static int __init null_init(void)
}
#endif
- if (submit_queues > nr_cpu_ids)
+ if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
+ if (submit_queues < nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.",
+ nr_online_nodes);
+ submit_queues = nr_online_nodes;
+ }
+ } else if (submit_queues > nr_cpu_ids)
submit_queues = nr_cpu_ids;
else if (!submit_queues)
submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93..eb6e1e0 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
{
switch (state) {
case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
}
}
-const char *skd_skreq_state_to_str(enum skd_req_state state)
+static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
case SKD_REQ_STATE_IDLE:
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 5a95baf..27de5046 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -43,9 +43,6 @@
#include <linux/zorro.h>
-extern int m68k_realnum_memory;
-extern struct mem_info m68k_memory[NUM_MEMINFO];
-
#define Z2MINOR_COMBINED (0)
#define Z2MINOR_Z2ONLY (1)
#define Z2MINOR_CHIPONLY (2)
@@ -116,8 +113,8 @@ get_z2ram( void )
if ( test_bit( i, zorro_unused_z2ram ) )
{
z2_count++;
- z2ram_map[ z2ram_size++ ] =
- ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT );
+ z2ram_map[z2ram_size++] = (unsigned long)ZTWO_VADDR(Z2RAM_START) +
+ (i << Z2RAM_CHUNKSHIFT);
clear_bit( i, zorro_unused_z2ram );
}
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb..dceb85f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0489, 0xe057) },
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f..3980fd1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index d79d692..896413b 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -735,7 +735,7 @@ static struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
-static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ PCI_DEVICE_CLASS(0, 0) },
{ }
};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index e6939e1..e210f85 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -1,12 +1,11 @@
/*
* i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
- * See http://www.debian.org/~dz/i8k/ for more information
- * and for latest version of this driver.
*
* Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
*
* Hwmon integration:
* Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2013 Guenter Roeck <linux@roeck-us.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -19,6 +18,8 @@
* General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -29,13 +30,12 @@
#include <linux/mutex.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/sched.h>
#include <linux/i8k.h>
-#define I8K_VERSION "1.14 21/02/2005"
-
#define I8K_SMM_FN_STATUS 0x0025
#define I8K_SMM_POWER_STATUS 0x0069
#define I8K_SMM_SET_FAN 0x01a3
@@ -44,7 +44,6 @@
#define I8K_SMM_GET_TEMP 0x10a3
#define I8K_SMM_GET_DELL_SIG1 0xfea3
#define I8K_SMM_GET_DELL_SIG2 0xffa3
-#define I8K_SMM_BIOS_VERSION 0x00a6
#define I8K_FAN_MULT 30
#define I8K_MAX_TEMP 127
@@ -64,6 +63,15 @@
static DEFINE_MUTEX(i8k_mutex);
static char bios_version[4];
static struct device *i8k_hwmon_dev;
+static u32 i8k_hwmon_flags;
+static int i8k_fan_mult;
+
+#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
+#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
+#define I8K_HWMON_HAVE_TEMP3 (1 << 2)
+#define I8K_HWMON_HAVE_TEMP4 (1 << 3)
+#define I8K_HWMON_HAVE_FAN1 (1 << 4)
+#define I8K_HWMON_HAVE_FAN2 (1 << 5)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
@@ -103,11 +111,11 @@ static const struct file_operations i8k_fops = {
struct smm_regs {
unsigned int eax;
- unsigned int ebx __attribute__ ((packed));
- unsigned int ecx __attribute__ ((packed));
- unsigned int edx __attribute__ ((packed));
- unsigned int esi __attribute__ ((packed));
- unsigned int edi __attribute__ ((packed));
+ unsigned int ebx __packed;
+ unsigned int ecx __packed;
+ unsigned int edx __packed;
+ unsigned int esi __packed;
+ unsigned int edi __packed;
};
static inline const char *i8k_get_dmi_data(int field)
@@ -124,6 +132,17 @@ static int i8k_smm(struct smm_regs *regs)
{
int rc;
int eax = regs->eax;
+ cpumask_var_t old_mask;
+
+ /* SMM requires CPU 0 */
+ if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(old_mask, &current->cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(0));
+ if (smp_processor_id() != 0) {
+ rc = -EBUSY;
+ goto out;
+ }
#if defined(CONFIG_X86_64)
asm volatile("pushq %%rax\n\t"
@@ -148,7 +167,7 @@ static int i8k_smm(struct smm_regs *regs)
"pushfq\n\t"
"popq %%rax\n\t"
"andl $1,%%eax\n"
- :"=a"(rc)
+ : "=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#else
@@ -174,25 +193,17 @@ static int i8k_smm(struct smm_regs *regs)
"lahf\n\t"
"shrl $8,%%eax\n\t"
"andl $1,%%eax\n"
- :"=a"(rc)
+ : "=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#endif
if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
- return -EINVAL;
+ rc = -EINVAL;
- return 0;
-}
-
-/*
- * Read the bios version. Return the version as an integer corresponding
- * to the ascii value, for example "A17" is returned as 0x00413137.
- */
-static int i8k_get_bios_version(void)
-{
- struct smm_regs regs = { .eax = I8K_SMM_BIOS_VERSION, };
-
- return i8k_smm(&regs) ? : regs.eax;
+out:
+ set_cpus_allowed_ptr(current, old_mask);
+ free_cpumask_var(old_mask);
+ return rc;
}
/*
@@ -203,7 +214,8 @@ static int i8k_get_fn_status(void)
struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
int rc;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
@@ -226,7 +238,8 @@ static int i8k_get_power_status(void)
struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
int rc;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
@@ -251,7 +264,7 @@ static int i8k_get_fan_speed(int fan)
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
regs.ebx = fan & 0xff;
- return i8k_smm(&regs) ? : (regs.eax & 0xffff) * fan_mult;
+ return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
/*
@@ -277,10 +290,11 @@ static int i8k_get_temp(int sensor)
int temp;
#ifdef I8K_TEMPERATURE_BUG
- static int prev;
+ static int prev[4];
#endif
regs.ebx = sensor & 0xff;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
temp = regs.eax & 0xff;
@@ -294,10 +308,10 @@ static int i8k_get_temp(int sensor)
# 1003655139 00000054 00005c52
*/
if (temp > I8K_MAX_TEMP) {
- temp = prev;
- prev = I8K_MAX_TEMP;
+ temp = prev[sensor];
+ prev[sensor] = I8K_MAX_TEMP;
} else {
- prev = temp;
+ prev[sensor] = temp;
}
#endif
@@ -309,7 +323,8 @@ static int i8k_get_dell_signature(int req_fn)
struct smm_regs regs = { .eax = req_fn, };
int rc;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
@@ -328,12 +343,14 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case I8K_BIOS_VERSION:
- val = i8k_get_bios_version();
+ val = (bios_version[0] << 16) |
+ (bios_version[1] << 8) | bios_version[2];
break;
case I8K_MACHINE_ID:
memset(buff, 0, 16);
- strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), sizeof(buff));
+ strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ sizeof(buff));
break;
case I8K_FN_STATUS:
@@ -470,12 +487,13 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
- int cpu_temp;
+ int index = to_sensor_dev_attr(devattr)->index;
+ int temp;
- cpu_temp = i8k_get_temp(0);
- if (cpu_temp < 0)
- return cpu_temp;
- return sprintf(buf, "%d\n", cpu_temp * 1000);
+ temp = i8k_get_temp(index);
+ if (temp < 0)
+ return temp;
+ return sprintf(buf, "%d\n", temp * 1000);
}
static ssize_t i8k_hwmon_show_fan(struct device *dev,
@@ -491,12 +509,44 @@ static ssize_t i8k_hwmon_show_fan(struct device *dev,
return sprintf(buf, "%d\n", fan_speed);
}
+static ssize_t i8k_hwmon_show_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ int status;
+
+ status = i8k_get_fan_status(index);
+ if (status < 0)
+ return -EIO;
+ return sprintf(buf, "%d\n", clamp_val(status * 128, 0, 255));
+}
+
+static ssize_t i8k_hwmon_set_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 128), 0, 2);
+
+ mutex_lock(&i8k_mutex);
+ err = i8k_set_fan(index, val);
+ mutex_unlock(&i8k_mutex);
+
+ return err < 0 ? -EIO : count;
+}
+
static ssize_t i8k_hwmon_show_label(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
- static const char *labels[4] = {
- "i8k",
+ static const char *labels[3] = {
"CPU",
"Left Fan",
"Right Fan",
@@ -506,108 +556,108 @@ static ssize_t i8k_hwmon_show_label(struct device *dev,
return sprintf(buf, "%s\n", labels[index]);
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
I8K_FAN_LEFT);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+ i8k_hwmon_set_pwm, I8K_FAN_LEFT);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
I8K_FAN_RIGHT);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+ i8k_hwmon_set_pwm, I8K_FAN_RIGHT);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
+
+static struct attribute *i8k_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
+ &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */
+ &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */
+ &sensor_dev_attr_temp3_input.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_temp4_input.dev_attr.attr, /* 4 */
+ &sensor_dev_attr_fan1_input.dev_attr.attr, /* 5 */
+ &sensor_dev_attr_pwm1.dev_attr.attr, /* 6 */
+ &sensor_dev_attr_fan1_label.dev_attr.attr, /* 7 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 8 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 9 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 10 */
+ NULL
+};
-static void i8k_hwmon_remove_files(struct device *dev)
+static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
{
- device_remove_file(dev, &dev_attr_temp1_input);
- device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_name.dev_attr);
+ if ((index == 0 || index == 1) &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+ return 0;
+ if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
+ return 0;
+ if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
+ return 0;
+ if (index == 4 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
+ return 0;
+ if (index >= 5 && index <= 7 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
+ return 0;
+ if (index >= 8 && index <= 10 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
+ return 0;
+
+ return attr->mode;
}
+static const struct attribute_group i8k_group = {
+ .attrs = i8k_attrs,
+ .is_visible = i8k_is_visible,
+};
+__ATTRIBUTE_GROUPS(i8k);
+
static int __init i8k_init_hwmon(void)
{
int err;
- i8k_hwmon_dev = hwmon_device_register(NULL);
- if (IS_ERR(i8k_hwmon_dev)) {
- err = PTR_ERR(i8k_hwmon_dev);
- i8k_hwmon_dev = NULL;
- printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err);
- return err;
- }
-
- /* Required name attribute */
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_name.dev_attr);
- if (err)
- goto exit_unregister;
+ i8k_hwmon_flags = 0;
/* CPU temperature attributes, if temperature reading is OK */
err = i8k_get_temp(0);
- if (err < 0) {
- dev_dbg(i8k_hwmon_dev,
- "Not creating temperature attributes (%d)\n", err);
- } else {
- err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input);
- if (err)
- goto exit_remove_files;
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_temp1_label.dev_attr);
- if (err)
- goto exit_remove_files;
- }
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1;
+ /* check for additional temperature sensors */
+ err = i8k_get_temp(1);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2;
+ err = i8k_get_temp(2);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3;
+ err = i8k_get_temp(3);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
/* Left fan attributes, if left fan is present */
err = i8k_get_fan_status(I8K_FAN_LEFT);
- if (err < 0) {
- dev_dbg(i8k_hwmon_dev,
- "Not creating %s fan attributes (%d)\n", "left", err);
- } else {
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan1_input.dev_attr);
- if (err)
- goto exit_remove_files;
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan1_label.dev_attr);
- if (err)
- goto exit_remove_files;
- }
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
/* Right fan attributes, if right fan is present */
err = i8k_get_fan_status(I8K_FAN_RIGHT);
- if (err < 0) {
- dev_dbg(i8k_hwmon_dev,
- "Not creating %s fan attributes (%d)\n", "right", err);
- } else {
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan2_input.dev_attr);
- if (err)
- goto exit_remove_files;
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan2_label.dev_attr);
- if (err)
- goto exit_remove_files;
- }
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
+ i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL,
+ i8k_groups);
+ if (IS_ERR(i8k_hwmon_dev)) {
+ err = PTR_ERR(i8k_hwmon_dev);
+ i8k_hwmon_dev = NULL;
+ pr_err("hwmon registration failed (%d)\n", err);
+ return err;
+ }
return 0;
-
- exit_remove_files:
- i8k_hwmon_remove_files(i8k_hwmon_dev);
- exit_unregister:
- hwmon_device_unregister(i8k_hwmon_dev);
- return err;
}
-static void __exit i8k_exit_hwmon(void)
-{
- i8k_hwmon_remove_files(i8k_hwmon_dev);
- hwmon_device_unregister(i8k_hwmon_dev);
-}
-
-static struct dmi_system_id __initdata i8k_dmi_table[] = {
+static struct dmi_system_id i8k_dmi_table[] __initdata = {
{
.ident = "Dell Inspiron",
.matches = {
@@ -671,7 +721,23 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
},
},
- { }
+ {
+ .ident = "Dell Studio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
+ },
+ .driver_data = (void *)1, /* fan multiplier override */
+ },
+ {
+ .ident = "Dell XPS M140",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
+ },
+ .driver_data = (void *)1, /* fan multiplier override */
+ },
+ { }
};
/*
@@ -679,8 +745,7 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
*/
static int __init i8k_probe(void)
{
- char buff[4];
- int version;
+ const struct dmi_system_id *id;
/*
* Get DMI information
@@ -689,49 +754,30 @@ static int __init i8k_probe(void)
if (!ignore_dmi && !force)
return -ENODEV;
- printk(KERN_INFO "i8k: not running on a supported Dell system.\n");
- printk(KERN_INFO "i8k: vendor=%s, model=%s, version=%s\n",
+ pr_info("not running on a supported Dell system.\n");
+ pr_info("vendor=%s, model=%s, version=%s\n",
i8k_get_dmi_data(DMI_SYS_VENDOR),
i8k_get_dmi_data(DMI_PRODUCT_NAME),
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version));
+ strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
+ sizeof(bios_version));
/*
* Get SMM Dell signature
*/
if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) &&
i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) {
- printk(KERN_ERR "i8k: unable to get SMM Dell signature\n");
+ pr_err("unable to get SMM Dell signature\n");
if (!force)
return -ENODEV;
}
- /*
- * Get SMM BIOS version.
- */
- version = i8k_get_bios_version();
- if (version <= 0) {
- printk(KERN_WARNING "i8k: unable to get SMM BIOS version\n");
- } else {
- buff[0] = (version >> 16) & 0xff;
- buff[1] = (version >> 8) & 0xff;
- buff[2] = (version) & 0xff;
- buff[3] = '\0';
- /*
- * If DMI BIOS version is unknown use SMM BIOS version.
- */
- if (!dmi_get_system_info(DMI_BIOS_VERSION))
- strlcpy(bios_version, buff, sizeof(bios_version));
-
- /*
- * Check if the two versions match.
- */
- if (strncmp(buff, bios_version, sizeof(bios_version)) != 0)
- printk(KERN_WARNING "i8k: BIOS version mismatch: %s != %s\n",
- buff, bios_version);
- }
+ i8k_fan_mult = fan_mult;
+ id = dmi_first_match(i8k_dmi_table);
+ if (id && fan_mult == I8K_FAN_MULT && id->driver_data)
+ i8k_fan_mult = (unsigned long)id->driver_data;
return 0;
}
@@ -754,10 +800,6 @@ static int __init i8k_init(void)
if (err)
goto exit_remove_proc;
- printk(KERN_INFO
- "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
- I8K_VERSION);
-
return 0;
exit_remove_proc:
@@ -767,7 +809,7 @@ static int __init i8k_init(void)
static void __exit i8k_exit(void)
{
- i8k_exit_hwmon();
+ hwmon_device_unregister(i8k_hwmon_dev);
remove_proc_entry("i8k", NULL);
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0913d79..c4094c4 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -587,6 +587,8 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
return -ENODEV;
switch ( cmd ) {
case LPTIME:
+ if (arg > UINT_MAX / HZ)
+ return -EINVAL;
LP_TIME(minor) = arg * HZ/100;
break;
case LPCHAR:
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 1fd00dc0..76c490f 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -168,7 +168,10 @@ static irqreturn_t button_handler (int irq, void *dev_id)
static int button_read (struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- interruptible_sleep_on (&button_wait_queue);
+ DEFINE_WAIT(wait);
+ prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&button_wait_queue, &wait);
return (copy_to_user (buffer, &button_output_buffer, bcount))
? -EFAULT : bcount;
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d39cca6..8320abd 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2511,8 +2511,8 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
- if (port->flags & ASYNC_CLOSING)
- interruptible_sleep_on(&port->close_wait);
+ wait_event_interruptible_tty(tty, port->close_wait,
+ !(port->flags & ASYNC_CLOSING));
retval = ((port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 8e562dc..e1f3337 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
void **return_value)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- if (strstr(buffer.pointer, context) != NULL) {
- *return_value = handle;
+
+ if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
+ if (strstr(buffer.pointer, context) != NULL) {
+ *return_value = handle;
+ status = AE_CTRL_TERMINATE;
+ }
kfree(buffer.pointer);
- return AE_CTRL_TERMINATE;
}
- return AE_OK;
+
+ return status;
}
static inline void ppi_assign_params(union acpi_object params[4],
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index d5d2e4a..daea84c 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -216,4 +216,4 @@ error:
ttyprintk_driver = NULL;
return ret;
}
-module_init(ttyprintk_init);
+device_initcall(ttyprintk_init);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e..5543b7d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
return 0;
}
-static unsigned int _get_val(struct clk_divider *divider, u8 div)
+static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
{
if (divider->flags & CLK_DIVIDER_ONE_BASED)
return div;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aa..68e515d 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
#define ASS_CLK_DIV 0x4
#define ASS_CLK_GATE 0x8
+/* list of all parent clock list */
+static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
+static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
+
+#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
{ASS_CLK_SRC, 0},
{ASS_CLK_DIV, 0},
{ASS_CLK_GATE, 0},
};
-/* list of all parent clock list */
-static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
-static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
-
-#ifdef CONFIG_PM_SLEEP
static int exynos_audss_clk_suspend(void)
{
int i;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50..1a7c1b9 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -39,7 +39,7 @@
#define SRC_TOP1 0xc214
#define SRC_CAM 0xc220
#define SRC_TV 0xc224
-#define SRC_MFC 0xcc28
+#define SRC_MFC 0xc228
#define SRC_G3D 0xc22c
#define E4210_SRC_IMAGE 0xc230
#define SRC_LCD0 0xc234
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf3234..e52359c 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -25,6 +25,7 @@
#define MPLL_LOCK 0x4000
#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
+#define GATE_IP_ACP 0x8800
#define CPLL_LOCK 0x10020
#define EPLL_LOCK 0x10030
#define VPLL_LOCK 0x10040
@@ -75,7 +76,6 @@
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
#define GATE_IP_DISP1 0x10928
-#define GATE_IP_ACP 0x10000
/* list of PLLs to be registered */
enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
+ wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
+ smmu_mdma0,
/* mux clocks */
mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
+ GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
+ GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
+ GATE(sysreg, "sysreg", "aclk66",
+ GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
+ GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
+ GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
};
static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 634c4d6..cd6950f 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -37,6 +37,10 @@ config SUN4I_TIMER
select CLKSRC_MMIO
bool
+config SUN5I_HSTIMER
+ select CLKSRC_MMIO
+ bool
+
config VT8500_TIMER
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 33621ef..358358d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
+obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index c639b1a..0fc31d0 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -202,7 +202,7 @@ static struct clocksource gt_clocksource = {
};
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
-static u32 notrace gt_sched_clock_read(void)
+static u64 notrace gt_sched_clock_read(void)
{
return gt_counter_read();
}
@@ -217,7 +217,7 @@ static void __init gt_clocksource_init(void)
writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
- setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate);
+ sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
#endif
clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0d7d8c3..5176e76 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -98,12 +98,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
return;
}
-static const struct of_device_id bcm_timer_ids[] __initconst = {
- {.compatible = "brcm,kona-timer"},
- {.compatible = "bcm,kona-timer"}, /* deprecated name */
- {},
-};
-
static void __init kona_timers_init(struct device_node *node)
{
u32 freq;
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index b2bb3a4b..63f176d 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -67,11 +67,13 @@
* struct ttc_timer - This definition defines local timer structure
*
* @base_addr: Base address of timer
+ * @freq: Timer input clock frequency
* @clk: Associated clock source
* @clk_rate_change_nb Notifier block for clock rate changes
*/
struct ttc_timer {
void __iomem *base_addr;
+ unsigned long freq;
struct clk *clk;
struct notifier_block clk_rate_change_nb;
};
@@ -158,7 +160,7 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs)
TTC_COUNT_VAL_OFFSET);
}
-static u32 notrace ttc_sched_clock_read(void)
+static u64 notrace ttc_sched_clock_read(void)
{
return __raw_readl(ttc_sched_clock_val_reg);
}
@@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- ttc_set_interval(timer,
- DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk),
- PRESCALE * HZ));
+ ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
+ PRESCALE * HZ));
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
@@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
return;
}
+ ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
+
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
@@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
__raw_writel(CNT_CNTRL_RESET,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
- err = clocksource_register_hz(&ttccs->cs,
- clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+ err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
if (WARN_ON(err)) {
kfree(ttccs);
return;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
- setup_sched_clock(ttc_sched_clock_read, 16,
- clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+ sched_clock_register(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
ndata->new_rate / PRESCALE);
local_irq_restore(flags);
+ /* update cached frequency */
+ ttc->freq = ndata->new_rate;
+
/* fall through */
}
case PRE_RATE_CHANGE:
@@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
if (clk_notifier_register(ttcce->ttc.clk,
&ttcce->ttc.clk_rate_change_nb))
pr_warn("Unable to register clock notifier.\n");
+ ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
ttcce->ce.name = "ttc_clockevent";
@@ -388,15 +393,14 @@ static void __init ttc_setup_clockevent(struct clk *clk,
__raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
err = request_irq(irq, ttc_clock_event_interrupt,
- IRQF_DISABLED | IRQF_TIMER,
- ttcce->ce.name, ttcce);
+ IRQF_TIMER, ttcce->ce.name, ttcce);
if (WARN_ON(err)) {
kfree(ttcce);
return;
}
clockevents_config_and_register(&ttcce->ce,
- clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe);
+ ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
}
/**
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index b9ddd9e..ae2e427 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -28,6 +28,7 @@ void __init clocksource_of_init(void)
struct device_node *np;
const struct of_device_id *match;
clocksource_of_init_fn init_func;
+ unsigned clocksources = 0;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
if (!of_device_is_available(np))
@@ -35,5 +36,8 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
+ clocksources++;
}
+ if (!clocksources)
+ pr_crit("%s: no matching clocksources found\n", __func__);
}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index ea21048..db21052 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -131,7 +131,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
+ .flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
.name = DRV_NAME,
};
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index e54ca10..f3656a6 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -243,8 +243,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->irqaction.dev_id = &dw_ced->ced;
dw_ced->irqaction.irq = irq;
dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL |
- IRQF_NOBALANCING |
- IRQF_DISABLED;
+ IRQF_NOBALANCING;
dw_ced->eoi = apbt_eoi;
err = setup_irq(irq, &dw_ced->irqaction);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index ed7b73b..152a3f3 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -187,7 +187,7 @@ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
static struct irqaction nmdk_timer_irq = {
.name = "Nomadik Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = nmdk_timer_interrupt,
.dev_id = &nmdk_clkevt,
};
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 85082e8..5645cfc 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -264,7 +264,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
static struct irqaction samsung_clock_event_irq = {
.name = "samsung_time_irq",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = samsung_clock_event_isr,
.dev_id = &time_event_device,
};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 0965e98..0b1836a 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -634,12 +634,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
+ struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+
+ pm_genpd_syscore_poweroff(&p->pdev->dev);
+ clk_unprepare(p->clk);
}
static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
+ struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+
+ clk_prepare(p->clk);
+ pm_genpd_syscore_poweron(&p->pdev->dev);
}
static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
@@ -726,8 +732,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "cmt_fck");
@@ -737,6 +742,10 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err2;
}
+ ret = clk_prepare(p->clk);
+ if (ret < 0)
+ goto err3;
+
if (res2 && (resource_size(res2) == 4)) {
/* assume both CMSTR and CMCSR to be 32-bit */
p->read_control = sh_cmt_read32;
@@ -773,19 +782,21 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
- goto err3;
+ goto err4;
}
p->cs_enabled = false;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err3;
+ goto err4;
}
platform_set_drvdata(pdev, p);
return 0;
+err4:
+ clk_unprepare(p->clk);
err3:
clk_put(p->clk);
err2:
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 3cf1283..e30d76e 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -302,8 +302,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
@@ -358,7 +357,6 @@ static int sh_mtu2_probe(struct platform_device *pdev)
ret = sh_mtu2_setup(p, pdev);
if (ret) {
kfree(p);
- platform_set_drvdata(pdev, NULL);
pm_runtime_idle(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 63557cd..ecd7b60 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -462,8 +462,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "tmu_fck");
@@ -523,7 +522,6 @@ static int sh_tmu_probe(struct platform_device *pdev)
ret = sh_tmu_setup(p, pdev);
if (ret) {
kfree(p);
- platform_set_drvdata(pdev, NULL);
pm_runtime_idle(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index a4f6119..bf497af 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -114,7 +114,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
static struct clock_event_device sun4i_clockevent = {
.name = "sun4i_tick",
- .rating = 300,
+ .rating = 350,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = sun4i_clkevt_mode,
.set_next_event = sun4i_clkevt_next_event,
@@ -138,7 +138,7 @@ static struct irqaction sun4i_timer_irq = {
.dev_id = &sun4i_clockevent,
};
-static u32 sun4i_timer_sched_read(void)
+static u64 notrace sun4i_timer_sched_read(void)
{
return ~readl(timer_base + TIMER_CNTVAL_REG(1));
}
@@ -170,9 +170,9 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_base + TIMER_CTL_REG(1));
- setup_sched_clock(sun4i_timer_sched_read, 32, rate);
+ sched_clock_register(sun4i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
- rate, 300, 32, clocksource_mmio_readl_down);
+ rate, 350, 32, clocksource_mmio_readl_down);
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
@@ -190,7 +190,8 @@ static void __init sun4i_timer_init(struct device_node *node)
val = readl(timer_base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
- sun4i_clockevent.cpumask = cpumask_of(0);
+ sun4i_clockevent.cpumask = cpu_possible_mask;
+ sun4i_clockevent.irq = irq;
clockevents_config_and_register(&sun4i_clockevent, rate,
TIMER_SYNC_TICKS, 0xffffffff);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 6428492..d1869f0 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -149,7 +149,7 @@ static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
static struct irqaction tegra_timer_irq = {
.name = "timer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
.handler = tegra_timer_interrupt,
.dev_id = &tegra_clockevent,
};
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 4e7f680..ee8691b 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -76,6 +76,7 @@
static void __iomem *timer_base, *local_base;
static unsigned int timer_clk;
static bool timer25Mhz = true;
+static u32 enable_mask;
/*
* Number of timer ticks per jiffy.
@@ -121,8 +122,7 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
/*
* Enable the timer.
*/
- local_timer_ctrl_clrset(TIMER0_RELOAD_EN,
- TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
return 0;
}
@@ -141,9 +141,7 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
/*
* Enable timer.
*/
- local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN |
- TIMER0_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
} else {
/*
* Disable timer.
@@ -240,10 +238,13 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
WARN_ON(!timer_base);
local_base = of_iomap(np, 1);
- if (timer25Mhz)
+ if (timer25Mhz) {
set = TIMER0_25MHZ;
- else
+ enable_mask = TIMER0_EN;
+ } else {
clr = TIMER0_25MHZ;
+ enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
+ }
timer_ctrl_clrset(clr, set);
local_timer_ctrl_clrset(clr, set);
@@ -262,8 +263,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
- timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
/*
* Set scale and timer for sched_clock.
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 9c7f018..2006622 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(orion_timer_ctrl_clrset);
/*
* Free-running clocksource handling.
*/
-static u32 notrace orion_read_sched_clock(void)
+static u64 notrace orion_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL);
}
@@ -135,7 +135,7 @@ static void __init orion_timer_init(struct device_node *np)
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
clk_get_rate(clk), 300, 32,
clocksource_mmio_readl_down);
- setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk));
+ sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
/* setup timer1 as clockevent timer */
if (setup_irq(irq, &orion_clkevt_irq))
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
new file mode 100644
index 0000000..deebcd6
--- /dev/null
+++ b/drivers/clocksource/timer-sun5i.c
@@ -0,0 +1,192 @@
+/*
+ * Allwinner SoCs hstimer driver.
+ *
+ * Copyright (C) 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define TIMER_IRQ_EN_REG 0x00
+#define TIMER_IRQ_EN(val) BIT(val)
+#define TIMER_IRQ_ST_REG 0x04
+#define TIMER_CTL_REG(val) (0x20 * (val) + 0x10)
+#define TIMER_CTL_ENABLE BIT(0)
+#define TIMER_CTL_RELOAD BIT(1)
+#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4)
+#define TIMER_CTL_ONESHOT BIT(7)
+#define TIMER_INTVAL_LO_REG(val) (0x20 * (val) + 0x14)
+#define TIMER_INTVAL_HI_REG(val) (0x20 * (val) + 0x18)
+#define TIMER_CNTVAL_LO_REG(val) (0x20 * (val) + 0x1c)
+#define TIMER_CNTVAL_HI_REG(val) (0x20 * (val) + 0x20)
+
+#define TIMER_SYNC_TICKS 3
+
+static void __iomem *timer_base;
+static u32 ticks_per_jiffy;
+
+/*
+ * When we disable a timer, we need to wait at least for 2 cycles of
+ * the timer source clock. We will use for that the clocksource timer
+ * that is already setup and runs at the same frequency than the other
+ * timers, and we never will be disabled.
+ */
+static void sun5i_clkevt_sync(void)
+{
+ u32 old = readl(timer_base + TIMER_CNTVAL_LO_REG(1));
+
+ while ((old - readl(timer_base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
+ cpu_relax();
+}
+
+static void sun5i_clkevt_time_stop(u8 timer)
+{
+ u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+ writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer));
+
+ sun5i_clkevt_sync();
+}
+
+static void sun5i_clkevt_time_setup(u8 timer, u32 delay)
+{
+ writel(delay, timer_base + TIMER_INTVAL_LO_REG(timer));
+}
+
+static void sun5i_clkevt_time_start(u8 timer, bool periodic)
+{
+ u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+
+ if (periodic)
+ val &= ~TIMER_CTL_ONESHOT;
+ else
+ val |= TIMER_CTL_ONESHOT;
+
+ writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ timer_base + TIMER_CTL_REG(timer));
+}
+
+static void sun5i_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ sun5i_clkevt_time_stop(0);
+ sun5i_clkevt_time_setup(0, ticks_per_jiffy);
+ sun5i_clkevt_time_start(0, true);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ sun5i_clkevt_time_stop(0);
+ sun5i_clkevt_time_start(0, false);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ sun5i_clkevt_time_stop(0);
+ break;
+ }
+}
+
+static int sun5i_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ sun5i_clkevt_time_stop(0);
+ sun5i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
+ sun5i_clkevt_time_start(0, false);
+
+ return 0;
+}
+
+static struct clock_event_device sun5i_clockevent = {
+ .name = "sun5i_tick",
+ .rating = 340,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sun5i_clkevt_mode,
+ .set_next_event = sun5i_clkevt_next_event,
+};
+
+
+static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+
+ writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction sun5i_timer_irq = {
+ .name = "sun5i_timer0",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = sun5i_timer_interrupt,
+ .dev_id = &sun5i_clockevent,
+};
+
+static u64 sun5i_timer_sched_read(void)
+{
+ return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
+}
+
+static void __init sun5i_timer_init(struct device_node *node)
+{
+ unsigned long rate;
+ struct clk *clk;
+ int ret, irq;
+ u32 val;
+
+ timer_base = of_iomap(node, 0);
+ if (!timer_base)
+ panic("Can't map registers");
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0)
+ panic("Can't parse IRQ");
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk))
+ panic("Can't get timer clock");
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+
+ writel(~0, timer_base + TIMER_INTVAL_LO_REG(1));
+ writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ timer_base + TIMER_CTL_REG(1));
+
+ sched_clock_register(sun5i_timer_sched_read, 32, rate);
+ clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
+ rate, 340, 32, clocksource_mmio_readl_down);
+
+ ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+
+ ret = setup_irq(irq, &sun5i_timer_irq);
+ if (ret)
+ pr_warn("failed to setup irq %d\n", irq);
+
+ /* Enable timer0 interrupt */
+ val = readl(timer_base + TIMER_IRQ_EN_REG);
+ writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+
+ sun5i_clockevent.cpumask = cpu_possible_mask;
+ sun5i_clockevent.irq = irq;
+
+ clockevents_config_and_register(&sun5i_clockevent, rate,
+ TIMER_SYNC_TICKS, 0xffffffff);
+}
+CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
+ sun5i_timer_init);
+CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
+ sun5i_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ad3c0e8..1098ed3 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -124,7 +124,7 @@ static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
static struct irqaction irq = {
.name = "vt8500_timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = vt8500_timer_interrupt,
.dev_id = &clockevent,
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534d..8d19f7c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
+
+ /* Use the default policy if its valid. */
+ if (cpufreq_driver->setpolicy)
+ cpufreq_parse_governor(policy->governor->name,
+ &new_policy.policy, NULL);
+
/* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
ret = cpufreq_set_policy(policy, &new_policy);
- policy->user_policy.policy = policy->policy;
- policy->user_policy.governor = policy->governor;
-
if (ret) {
pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
@@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
- unsigned int cpu, struct device *dev,
- bool frozen)
+ unsigned int cpu, struct device *dev)
{
int ret = 0;
unsigned long flags;
@@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- /* Don't touch sysfs links during light-weight init */
- if (!frozen)
- ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-
- return ret;
+ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
}
#endif
@@ -926,6 +924,27 @@ err_free_policy:
return NULL;
}
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ down_read(&policy->rwsem);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_read(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
free_cpumask_var(policy->related_cpus);
@@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
up_read(&cpufreq_rwsem);
return ret;
}
@@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
- if (frozen)
- /* Restore the saved policy when doing light-weight init */
- policy = cpufreq_policy_restore(cpu);
- else
+ /*
+ * Restore the saved policy when doing light-weight init and fall back
+ * to the full init if that fails.
+ */
+ policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+ if (!policy) {
+ frozen = false;
policy = cpufreq_policy_alloc();
-
- if (!policy)
- goto nomem_out;
-
+ if (!policy)
+ goto nomem_out;
+ }
/*
* In the resume path, since we restore a saved policy, the assignment
@@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
+ if (!frozen) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
+ if (!frozen) {
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+ }
+
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1096,7 +1124,13 @@ err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
+ if (frozen) {
+ /* Do not leave stale fallback data behind. */
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
+ cpufreq_policy_put_kobj(policy);
+ }
cpufreq_policy_free(policy);
+
nomem_out:
up_read(&cpufreq_rwsem);
@@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
- unsigned int old_cpu, bool frozen)
+ unsigned int old_cpu)
{
struct device *cpu_dev;
int ret;
@@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
/* first sibling now owns the new sysfs dir */
cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
- /* Don't touch sysfs files during light-weight tear-down */
- if (frozen)
- return cpu_dev->id;
-
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
@@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
@@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
int ret;
unsigned long flags;
struct cpufreq_policy *policy;
- struct kobject *kobj;
- struct completion *cmp;
read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}
- if (!frozen) {
- down_read(&policy->rwsem);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_read(&policy->rwsem);
- kobject_put(kobj);
-
- /*
- * We need to make sure that the underlying kobj is
- * actually not referenced anymore by anybody before we
- * proceed with unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
- }
+ if (!frozen)
+ cpufreq_policy_put_kobj(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae..d51f17ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data)
}
#define ICPU(model, policy) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
+ (unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, core_params),
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu);
+ if (!cpu->pstate.current_pstate) {
+ all_cpu_data[cpunum] = NULL;
+ kfree(cpu);
+ return -ENODATA;
+ }
cpu->cpu = cpunum;
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 3679563..6e51114 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
.state_count = 2,
};
-static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
+static int calxeda_cpuidle_probe(struct platform_device *pdev)
{
return cpuidle_register(&calxeda_idle_driver, NULL);
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9dd6e01..f757a0f 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i, err ;
+ int i, err;
pdev = platform_device_register_full(&ixp_dev_info);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- dev = &pdev->dev;
-
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c7776..8752918 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_src)) {
+ dev_err(dev, "mapping src buffer failed\n");
+ goto free_resources;
+ }
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_dest)) {
+ dev_err(dev, "mapping dest buffer failed\n");
+ goto unmap_src;
+ }
flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
unmap_dma:
- dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index b53d0de..98e14ee 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,7 @@
#include "amd64_edac.h"
#include <asm/amd_nb.h>
-static struct edac_pci_ctl_info *amd64_ctl_pci;
+static struct edac_pci_ctl_info *pci_ctl;
static int report_gart_errors;
module_param(report_gart_errors, int, 0644);
@@ -162,7 +162,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
* scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
*/
-static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
{
u32 scrubval;
int i;
@@ -198,7 +198,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
return 0;
}
-static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
+static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 min_scrubrate = 0x5;
@@ -210,10 +210,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
if (pvt->fam == 0x15 && pvt->model < 0x10)
f15h_select_dct(pvt, 0);
- return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
+ return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
-static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
+static int get_scrub_rate(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 scrubval = 0;
@@ -240,8 +240,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* returns true if the SysAddr given by sys_addr matches the
* DRAM base/limit associated with node_id
*/
-static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- u8 nid)
+static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
{
u64 addr;
@@ -285,7 +284,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
if (intlv_en == 0) {
for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
- if (amd64_base_limit_match(pvt, sys_addr, node_id))
+ if (base_limit_match(pvt, sys_addr, node_id))
goto found;
}
goto err_no_match;
@@ -309,7 +308,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
/* sanity test for sys_addr */
- if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+ if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
"range for node %d with node interleaving enabled.\n",
__func__, sys_addr, node_id);
@@ -660,7 +659,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
-static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
+static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
{
u8 bit;
unsigned long edac_cap = EDAC_FLAG_NONE;
@@ -675,9 +674,9 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
+static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
-static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
+static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
@@ -711,7 +710,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
(pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
(pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
- amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0);
+ debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
@@ -722,19 +721,19 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(pvt, 0);
+ debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (pvt->fam == 0xf)
return;
- amd64_debug_display_dimm_sizes(pvt, 1);
+ debug_display_dimm_sizes(pvt, 1);
amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
- amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1);
+ debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
}
/*
@@ -800,7 +799,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
-static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
+static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
@@ -1578,7 +1577,7 @@ static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
num_dcts_intlv, dct_sel);
/* Verify we stay within the MAX number of channels allowed */
- if (channel > 4 || channel < 0)
+ if (channel > 3)
return -EINVAL;
leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
@@ -1702,7 +1701,7 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs
*/
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1;
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
@@ -1744,7 +1743,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
}
}
-static struct amd64_family_type amd64_family_types[] = {
+static struct amd64_family_type family_types[] = {
[K8_CPUS] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
@@ -2005,9 +2004,9 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
string, "");
}
-static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct mce *m)
+static inline void decode_bus_error(int node_id, struct mce *m)
{
+ struct mem_ctl_info *mci = mcis[node_id];
struct amd64_pvt *pvt = mci->pvt_info;
u8 ecc_type = (m->status >> 45) & 0x3;
u8 xec = XEC(m->status, 0x1f);
@@ -2035,11 +2034,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
__log_bus_error(mci, &err, ecc_type);
}
-void amd64_decode_bus_error(int node_id, struct mce *m)
-{
- __amd64_decode_bus_error(mcis[node_id], m);
-}
-
/*
* Use pvt->F2 which contains the F2 CPU PCI device to get the related
* F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
@@ -2196,7 +2190,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
+static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
@@ -2263,19 +2257,19 @@ static int init_csrows(struct mem_ctl_info *mci)
pvt->mc_node_id, i);
if (row_dct0) {
- nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ nr_pages = get_csrow_nr_pages(pvt, 0, i);
csrow->channels[0]->dimm->nr_pages = nr_pages;
}
/* K8 has only one DCT */
if (pvt->fam != 0xf && row_dct1) {
- int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
+ int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
nr_pages += row_dct1_pages;
}
- mtype = amd64_determine_memory_type(pvt, i);
+ mtype = determine_memory_type(pvt, i);
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
@@ -2309,7 +2303,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
+static bool nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2482,7 +2476,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
- nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
amd64_notice("NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
@@ -2537,7 +2531,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- mci->edac_cap = amd64_determine_edac_cap(pvt);
+ mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
@@ -2545,14 +2539,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
- mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
- mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
+ mci->set_sdram_scrub_rate = set_scrub_rate;
+ mci->get_sdram_scrub_rate = get_scrub_rate;
}
/*
* returns a pointer to the family descriptor on success, NULL otherwise.
*/
-static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
+static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
{
struct amd64_family_type *fam_type = NULL;
@@ -2563,29 +2557,29 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
switch (pvt->fam) {
case 0xf:
- fam_type = &amd64_family_types[K8_CPUS];
- pvt->ops = &amd64_family_types[K8_CPUS].ops;
+ fam_type = &family_types[K8_CPUS];
+ pvt->ops = &family_types[K8_CPUS].ops;
break;
case 0x10:
- fam_type = &amd64_family_types[F10_CPUS];
- pvt->ops = &amd64_family_types[F10_CPUS].ops;
+ fam_type = &family_types[F10_CPUS];
+ pvt->ops = &family_types[F10_CPUS].ops;
break;
case 0x15:
if (pvt->model == 0x30) {
- fam_type = &amd64_family_types[F15_M30H_CPUS];
- pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops;
+ fam_type = &family_types[F15_M30H_CPUS];
+ pvt->ops = &family_types[F15_M30H_CPUS].ops;
break;
}
- fam_type = &amd64_family_types[F15_CPUS];
- pvt->ops = &amd64_family_types[F15_CPUS].ops;
+ fam_type = &family_types[F15_CPUS];
+ pvt->ops = &family_types[F15_CPUS].ops;
break;
case 0x16:
- fam_type = &amd64_family_types[F16_CPUS];
- pvt->ops = &amd64_family_types[F16_CPUS].ops;
+ fam_type = &family_types[F16_CPUS];
+ pvt->ops = &family_types[F16_CPUS].ops;
break;
default:
@@ -2601,7 +2595,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
return fam_type;
}
-static int amd64_init_one_instance(struct pci_dev *F2)
+static int init_one_instance(struct pci_dev *F2)
{
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
@@ -2619,7 +2613,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
pvt->F2 = F2;
ret = -EINVAL;
- fam_type = amd64_per_family_init(pvt);
+ fam_type = per_family_init(pvt);
if (!fam_type)
goto err_free;
@@ -2680,7 +2674,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
if (report_gart_errors)
amd_report_gart_errors(true);
- amd_register_ecc_decoder(amd64_decode_bus_error);
+ amd_register_ecc_decoder(decode_bus_error);
mcis[nid] = mci;
@@ -2703,8 +2697,8 @@ err_ret:
return ret;
}
-static int amd64_probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int probe_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
{
u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -2736,7 +2730,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev,
goto err_enable;
}
- ret = amd64_init_one_instance(pdev);
+ ret = init_one_instance(pdev);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
restore_ecc_error_reporting(s, nid, F3);
@@ -2752,7 +2746,7 @@ err_out:
return ret;
}
-static void amd64_remove_one_instance(struct pci_dev *pdev)
+static void remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -2777,7 +2771,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
- amd_unregister_ecc_decoder(amd64_decode_bus_error);
+ amd_unregister_ecc_decoder(decode_bus_error);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
@@ -2795,7 +2789,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
* PCI core identifies what devices are on a system during boot, and then
* inquiry this table to see if this driver is for a given device found.
*/
-static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
+static const struct pci_device_id amd64_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
@@ -2843,8 +2837,8 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
static struct pci_driver amd64_pci_driver = {
.name = EDAC_MOD_STR,
- .probe = amd64_probe_one_instance,
- .remove = amd64_remove_one_instance,
+ .probe = probe_one_instance,
+ .remove = remove_one_instance,
.id_table = amd64_pci_table,
};
@@ -2853,23 +2847,18 @@ static void setup_pci_device(void)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- if (amd64_ctl_pci)
+ if (pci_ctl)
return;
mci = mcis[0];
- if (mci) {
-
- pvt = mci->pvt_info;
- amd64_ctl_pci =
- edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
-
- if (!amd64_ctl_pci) {
- pr_warning("%s(): Unable to create PCI control\n",
- __func__);
+ if (!mci)
+ return;
- pr_warning("%s(): PCI error report via EDAC not set\n",
- __func__);
- }
+ pvt = mci->pvt_info;
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ if (!pci_ctl) {
+ pr_warn("%s(): Unable to create PCI control\n", __func__);
+ pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
}
}
@@ -2925,8 +2914,8 @@ err_ret:
static void __exit amd64_edac_exit(void)
{
- if (amd64_ctl_pci)
- edac_pci_release_generic_ctl(amd64_ctl_pci);
+ if (pci_ctl)
+ edac_pci_release_generic_ctl(pci_ctl);
pci_unregister_driver(&amd64_pci_driver);
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 96e3ee3..3a501b5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -333,7 +333,7 @@ static void amd76x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = {
+static const struct pci_device_id amd76x_pci_tbl[] = {
{
PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
AMD762},
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 644fec5..92d54fa 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1182,9 +1182,11 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev, pvt->bridge_ck);
- if (pvt->bridge_ck == NULL)
+ if (pvt->bridge_ck == NULL) {
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
+ pci_dev_get(pvt->bridge_ck);
+ }
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
@@ -1421,7 +1423,7 @@ static void e752x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = {
+static const struct pci_device_id e752x_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7520},
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 1c4056a..3cda79b 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -555,7 +555,7 @@ static void e7xxx_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = {
+static const struct pci_device_id e7xxx_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7205},
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 1026743..592af5f 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -437,6 +437,9 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{
int status;
+ if (!edac_dev->edac_check)
+ return;
+
status = cancel_delayed_work(&edac_dev->work);
if (status == 0) {
/* workq instance might be running, wait for it */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 9f7e0e60..51c0362 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -914,7 +914,7 @@ void __exit edac_debugfs_exit(void)
debugfs_remove(edac_debugfs);
}
-int edac_create_debug_nodes(struct mem_ctl_info *mci)
+static int edac_create_debug_nodes(struct mem_ctl_info *mci)
{
struct dentry *d, *parent;
char name[80];
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 351945f..9d9e18a 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -29,6 +29,25 @@ EXPORT_SYMBOL_GPL(edac_err_assert);
static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
+int edac_report_status = EDAC_REPORTING_ENABLED;
+EXPORT_SYMBOL_GPL(edac_report_status);
+
+static int __init edac_report_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strncmp(str, "on", 2))
+ set_edac_report_status(EDAC_REPORTING_ENABLED);
+ else if (!strncmp(str, "off", 3))
+ set_edac_report_status(EDAC_REPORTING_DISABLED);
+ else if (!strncmp(str, "force", 5))
+ set_edac_report_status(EDAC_REPORTING_FORCE);
+
+ return 0;
+}
+__setup("edac_report=", edac_report_setup);
+
/*
* called to determine if there is an EDAC driver interested in
* knowing an event (such as NMI) occurred
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 694efcb..cd28b96 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -487,7 +487,7 @@ static void i3000_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = {
+static const struct pci_device_id i3000_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3000},
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index be10a74..fa1326e 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -466,7 +466,7 @@ static void i3200_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = {
+static const struct pci_device_id i3200_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3200},
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 63b2194..72e07e3 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1530,7 +1530,7 @@ static void i5000_remove_one(struct pci_dev *pdev)
*
* The "E500P" device is the first device supported.
*/
-static DEFINE_PCI_DEVICE_TABLE(i5000_pci_tbl) = {
+static const struct pci_device_id i5000_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
.driver_data = I5000P},
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 157b934..36a38ee 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1213,7 +1213,7 @@ static void i5100_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = {
+static const struct pci_device_id i5100_pci_tbl[] = {
/* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
{ 0, }
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 0a05bbc..e080cbf 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1416,7 +1416,7 @@ static void i5400_remove_one(struct pci_dev *pdev)
*
* The "E500P" device is the first device supported.
*/
-static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = {
+static const struct pci_device_id i5400_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
{0,} /* 0 terminated list. */
};
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 9004c64..d63f479 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1160,7 +1160,7 @@ static void i7300_remove_one(struct pci_dev *pdev)
*
* Has only 8086:360c PCI ID
*/
-static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = {
+static const struct pci_device_id i7300_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
{0,} /* 0 terminated list. */
};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 80a963d..87533ca 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -394,7 +394,7 @@ static const struct pci_id_table pci_dev_table[] = {
/*
* pci_device_id table for which devices we are looking for
*/
-static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
+static const struct pci_device_id i7core_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
{0,} /* 0 terminated list. */
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 57fdb77..d730e276 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -386,7 +386,7 @@ static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
-static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = {
+static const struct pci_device_id i82443bxgx_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 3e3e431..3382f63 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -288,7 +288,7 @@ static void i82860_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = {
+static const struct pci_device_id i82860_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82860},
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 2f8535f..80573df 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -527,7 +527,7 @@ static void i82875p_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = {
+static const struct pci_device_id i82875p_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82875P},
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0c8d4b0..10b1052 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -628,7 +628,7 @@ static void i82975x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = {
+static const struct pci_device_id i82975x_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82975X
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index fd46b0b..8f91821 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1,6 +1,8 @@
/*
* Freescale MPC85xx Memory Controller kenel module
*
+ * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
+ *
* Author: Dave Jiang <djiang@mvista.com>
*
* 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
@@ -196,6 +198,42 @@ static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
edac_pci_handle_npe(pci, pci->ctl_name);
}
+static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
+{
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ pr_err("PCIe error(s) detected\n");
+ pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
+ pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR));
+ pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
+ pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1));
+ pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2));
+ pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3));
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+}
+
+static int mpc85xx_pcie_find_capability(struct device_node *np)
+{
+ struct pci_controller *hose;
+
+ if (!np)
+ return -EINVAL;
+
+ hose = pci_find_hose_for_OF_device(np);
+
+ return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
+}
+
static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
{
struct edac_pci_ctl_info *pci = dev_id;
@@ -207,7 +245,10 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
if (!err_detect)
return IRQ_NONE;
- mpc85xx_pci_check(pci);
+ if (pdata->is_pcie)
+ mpc85xx_pcie_check(pci);
+ else
+ mpc85xx_pci_check(pci);
return IRQ_HANDLED;
}
@@ -239,14 +280,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
pdata = pci->pvt_info;
pdata->name = "mpc85xx_pci_err";
pdata->irq = NO_IRQ;
+
+ if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0)
+ pdata->is_pcie = true;
+
dev_set_drvdata(&op->dev, pci);
pci->dev = &op->dev;
pci->mod_name = EDAC_MOD_STR;
pci->ctl_name = pdata->name;
pci->dev_name = dev_name(&op->dev);
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci->edac_check = mpc85xx_pci_check;
+ if (edac_op_state == EDAC_OPSTATE_POLL) {
+ if (pdata->is_pcie)
+ pci->edac_check = mpc85xx_pcie_check;
+ else
+ pci->edac_check = mpc85xx_pci_check;
+ }
pdata->edac_idx = edac_pci_idx++;
@@ -275,16 +324,26 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
goto err;
}
- orig_pci_err_cap_dr =
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
+ if (pdata->is_pcie) {
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0);
+ orig_pci_err_en =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0);
+ } else {
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
- /* PCI master abort is expected during config cycles */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
+ /* PCI master abort is expected during config cycles */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
- orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+ orig_pci_err_en =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
- /* disable master abort reporting */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+ /* disable master abort reporting */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+ }
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
@@ -297,7 +356,8 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_pci_isr, IRQF_DISABLED,
+ mpc85xx_pci_isr,
+ IRQF_DISABLED | IRQF_SHARED,
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
@@ -312,6 +372,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
pdata->irq);
}
+ if (pdata->is_pcie) {
+ /*
+ * Enable all PCIe error interrupt & error detect except invalid
+ * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation
+ * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access
+ * detection enable bit. Because PCIe bus code to initialize and
+ * configure these PCIe devices on booting will use some invalid
+ * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much
+ * notice information. So disable this detect to fix ugly print.
+ */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0
+ & ~PEX_ERR_ICCAIE_EN_BIT);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0
+ | PEX_ERR_ICCAD_DISR_BIT);
+ }
+
devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 932016f..8c62564 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -134,13 +134,19 @@
#define MPC85XX_PCI_ERR_DR 0x0000
#define MPC85XX_PCI_ERR_CAP_DR 0x0004
#define MPC85XX_PCI_ERR_EN 0x0008
+#define PEX_ERR_ICCAIE_EN_BIT 0x00020000
#define MPC85XX_PCI_ERR_ATTRIB 0x000c
#define MPC85XX_PCI_ERR_ADDR 0x0010
+#define PEX_ERR_ICCAD_DISR_BIT 0x00020000
#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
#define MPC85XX_PCI_ERR_DL 0x0018
#define MPC85XX_PCI_ERR_DH 0x001c
#define MPC85XX_PCI_GAS_TIMR 0x0020
#define MPC85XX_PCI_PCIX_TIMR 0x0024
+#define MPC85XX_PCIE_ERR_CAP_R0 0x0028
+#define MPC85XX_PCIE_ERR_CAP_R1 0x002c
+#define MPC85XX_PCIE_ERR_CAP_R2 0x0030
+#define MPC85XX_PCIE_ERR_CAP_R3 0x0034
struct mpc85xx_mc_pdata {
char *name;
@@ -158,6 +164,7 @@ struct mpc85xx_l2_pdata {
struct mpc85xx_pci_pdata {
char *name;
+ bool is_pcie;
int edac_idx;
void __iomem *pci_vbase;
int irq;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 2fd6a54..8f936bc 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -383,7 +383,7 @@ static void r82600_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = {
+static const struct pci_device_id r82600_pci_tbl[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
},
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index d7f1b57..54e2abe 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -461,7 +461,7 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
/*
* pci_device_id table for which devices we are looking for
*/
-static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = {
+static const struct pci_device_id sbridge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
{0,} /* 0 terminated list. */
@@ -915,7 +915,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
}
}
-struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
+static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
{
struct sbridge_dev *sbridge_dev;
@@ -1829,6 +1829,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
struct mem_ctl_info *mci;
struct sbridge_pvt *pvt;
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
return NOTIFY_BAD;
@@ -2142,9 +2145,10 @@ static int __init sbridge_init(void)
opstate_init();
pci_rc = pci_register_driver(&sbridge_driver);
-
if (pci_rc >= 0) {
mce_register_decode_chain(&sbridge_mce_dec);
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
return 0;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 1a4df82..4891b45 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -448,7 +448,7 @@ static void x38_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = {
+static const struct pci_device_id x38_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
X38},
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index f1d54a3..bdb5a00 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -31,6 +31,16 @@ config EXTCON_ADC_JACK
help
Say Y here to enable extcon device driver based on ADC values.
+config EXTCON_MAX14577
+ tristate "MAX14577 EXTCON Support"
+ depends on MFD_MAX14577
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory
+ detector and switch.
+
config EXTCON_MAX77693
tristate "MAX77693 EXTCON Support"
depends on MFD_MAX77693 && INPUT
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 759fdae..43eccc0 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_OF_EXTCON) += of_extcon.o
obj-$(CONFIG_EXTCON) += extcon-class.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
+obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index a287cec..c20602f 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -44,6 +44,15 @@
#define HPDET_DEBOUNCE 500
#define DEFAULT_MICD_TIMEOUT 2000
+#define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
+ ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
+ ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
+ ARIZONA_MICD_LVL_7)
+
+#define MICD_LVL_0_TO_7 (ARIZONA_MICD_LVL_0 | MICD_LVL_1_TO_7)
+
+#define MICD_LVL_0_TO_8 (MICD_LVL_0_TO_7 | ARIZONA_MICD_LVL_8)
+
struct arizona_extcon_info {
struct device *dev;
struct arizona *arizona;
@@ -426,26 +435,15 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
}
val &= ARIZONA_HP_LVL_B_MASK;
+ /* Convert to ohms, the value is in 0.5 ohm increments */
+ val /= 2;
regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
&range);
range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
>> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
- /* Skip up or down a range? */
- if (range && (val < arizona_hpdet_c_ranges[range].min)) {
- range--;
- dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
- arizona_hpdet_c_ranges[range].min,
- arizona_hpdet_c_ranges[range].max);
- regmap_update_bits(arizona->regmap,
- ARIZONA_HEADPHONE_DETECT_1,
- ARIZONA_HP_IMPEDANCE_RANGE_MASK,
- range <<
- ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
- return -EAGAIN;
- }
-
+ /* Skip up a range, or report? */
if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
(val >= arizona_hpdet_c_ranges[range].max)) {
range++;
@@ -459,6 +457,12 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
return -EAGAIN;
}
+
+ if (range && (val < arizona_hpdet_c_ranges[range].min)) {
+ dev_dbg(arizona->dev, "Reporting range boundary %d\n",
+ arizona_hpdet_c_ranges[range].min);
+ val = arizona_hpdet_c_ranges[range].min;
+ }
}
dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
@@ -594,9 +598,15 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
dev_err(arizona->dev, "Failed to report HP/line: %d\n",
ret);
+done:
+ /* Reset back to starting range */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
+ 0);
+
arizona_extcon_do_magic(info, 0);
-done:
if (id_gpio)
gpio_set_value_cansleep(id_gpio, 0);
@@ -765,7 +775,20 @@ static void arizona_micd_detect(struct work_struct *work)
mutex_lock(&info->lock);
- for (i = 0; i < 10 && !(val & 0x7fc); i++) {
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ mutex_unlock(&info->lock);
+ return;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
+ mutex_unlock(&info->lock);
+ return;
+ }
+
+ for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev,
@@ -784,7 +807,7 @@ static void arizona_micd_detect(struct work_struct *work)
}
}
- if (i == 10 && !(val & 0x7fc)) {
+ if (i == 10 && !(val & MICD_LVL_0_TO_8)) {
dev_err(arizona->dev, "Failed to get valid MICDET value\n");
mutex_unlock(&info->lock);
return;
@@ -798,7 +821,7 @@ static void arizona_micd_detect(struct work_struct *work)
}
/* If we got a high impedence we should have a headset, report it. */
- if (info->detecting && (val & 0x400)) {
+ if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
arizona_identify_headphone(info);
ret = extcon_update_state(&info->edev,
@@ -827,7 +850,7 @@ static void arizona_micd_detect(struct work_struct *work)
* plain headphones. If both polarities report a low
* impedence then give up and report headphones.
*/
- if (info->detecting && (val & 0x3f8)) {
+ if (info->detecting && (val & MICD_LVL_1_TO_7)) {
if (info->jack_flips >= info->micd_num_modes * 10) {
dev_dbg(arizona->dev, "Detected HP/line\n");
arizona_identify_headphone(info);
@@ -851,7 +874,7 @@ static void arizona_micd_detect(struct work_struct *work)
* If we're still detecting and we detect a short then we've
* got a headphone. Otherwise it's a button press.
*/
- if (val & 0x3fc) {
+ if (val & MICD_LVL_0_TO_7) {
if (info->mic) {
dev_dbg(arizona->dev, "Mic button detected\n");
@@ -1126,6 +1149,16 @@ static int arizona_extcon_probe(struct platform_device *pdev)
break;
}
break;
+ case WM5110:
+ switch (arizona->rev) {
+ case 0 ... 2:
+ break;
+ default:
+ info->micd_clamp = true;
+ info->hpdet_ip = 2;
+ break;
+ }
+ break;
default:
break;
}
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 7e0dff5..a63a6b2 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -40,6 +40,7 @@ struct gpio_extcon_data {
int irq;
struct delayed_work work;
unsigned long debounce_jiffies;
+ bool check_on_resume;
};
static void gpio_extcon_work(struct work_struct *work)
@@ -103,8 +104,15 @@ static int gpio_extcon_probe(struct platform_device *pdev)
extcon_data->gpio_active_low = pdata->gpio_active_low;
extcon_data->state_on = pdata->state_on;
extcon_data->state_off = pdata->state_off;
+ extcon_data->check_on_resume = pdata->check_on_resume;
if (pdata->state_on && pdata->state_off)
extcon_data->edev.print_state = extcon_gpio_print_state;
+
+ ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
+ pdev->name);
+ if (ret < 0)
+ return ret;
+
if (pdata->debounce) {
ret = gpio_set_debounce(extcon_data->gpio,
pdata->debounce * 1000);
@@ -117,11 +125,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
- pdev->name);
- if (ret < 0)
- goto err;
-
INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work);
extcon_data->irq = gpio_to_irq(extcon_data->gpio);
@@ -159,12 +162,31 @@ static int gpio_extcon_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int gpio_extcon_resume(struct device *dev)
+{
+ struct gpio_extcon_data *extcon_data;
+
+ extcon_data = dev_get_drvdata(dev);
+ if (extcon_data->check_on_resume)
+ queue_delayed_work(system_power_efficient_wq,
+ &extcon_data->work, extcon_data->debounce_jiffies);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gpio_extcon_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
+};
+
static struct platform_driver gpio_extcon_driver = {
.probe = gpio_extcon_probe,
.remove = gpio_extcon_remove,
.driver = {
.name = "extcon-gpio",
.owner = THIS_MODULE,
+ .pm = &gpio_extcon_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
new file mode 100644
index 0000000..3846941
--- /dev/null
+++ b/drivers/extcon/extcon-max14577.c
@@ -0,0 +1,752 @@
+/*
+ * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC
+ *
+ * Copyright (C) 2013 Samsung Electrnoics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max14577.h>
+#include <linux/mfd/max14577-private.h>
+#include <linux/extcon.h>
+
+#define DEV_NAME "max14577-muic"
+#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
+
+enum max14577_muic_adc_debounce_time {
+ ADC_DEBOUNCE_TIME_5MS = 0,
+ ADC_DEBOUNCE_TIME_10MS,
+ ADC_DEBOUNCE_TIME_25MS,
+ ADC_DEBOUNCE_TIME_38_62MS,
+};
+
+enum max14577_muic_status {
+ MAX14577_MUIC_STATUS1 = 0,
+ MAX14577_MUIC_STATUS2 = 1,
+ MAX14577_MUIC_STATUS_END,
+};
+
+struct max14577_muic_info {
+ struct device *dev;
+ struct max14577 *max14577;
+ struct extcon_dev *edev;
+ int prev_cable_type;
+ int prev_chg_type;
+ u8 status[MAX14577_MUIC_STATUS_END];
+
+ bool irq_adc;
+ bool irq_chg;
+ struct work_struct irq_work;
+ struct mutex mutex;
+
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+enum max14577_muic_cable_group {
+ MAX14577_CABLE_GROUP_ADC = 0,
+ MAX14577_CABLE_GROUP_CHG,
+};
+
+/**
+ * struct max14577_muic_irq
+ * @irq: the index of irq list of MUIC device.
+ * @name: the name of irq.
+ * @virq: the virtual irq to use irq domain
+ */
+struct max14577_muic_irq {
+ unsigned int irq;
+ const char *name;
+ unsigned int virq;
+};
+
+static struct max14577_muic_irq muic_irqs[] = {
+ { MAX14577_IRQ_INT1_ADC, "muic-ADC" },
+ { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
+ { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
+ { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
+ { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
+ { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
+ { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
+ { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
+};
+
+/* Define supported accessory type */
+enum max14577_muic_acc_type {
+ MAX14577_MUIC_ADC_GROUND = 0x0,
+ MAX14577_MUIC_ADC_SEND_END_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S1_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S2_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S3_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S4_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S5_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S6_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S7_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S8_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S9_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S10_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S11_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S12_BUTTON,
+ MAX14577_MUIC_ADC_RESERVED_ACC_1,
+ MAX14577_MUIC_ADC_RESERVED_ACC_2,
+ MAX14577_MUIC_ADC_RESERVED_ACC_3,
+ MAX14577_MUIC_ADC_RESERVED_ACC_4,
+ MAX14577_MUIC_ADC_RESERVED_ACC_5,
+ MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2,
+ MAX14577_MUIC_ADC_PHONE_POWERED_DEV,
+ MAX14577_MUIC_ADC_TTY_CONVERTER,
+ MAX14577_MUIC_ADC_UART_CABLE,
+ MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG,
+ MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF,
+ MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON,
+ MAX14577_MUIC_ADC_AV_CABLE_NOLOAD,
+ MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG,
+ MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF,
+ MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON,
+ MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1, /* with Remote and Simple Ctrl */
+ MAX14577_MUIC_ADC_OPEN,
+};
+
+/* max14577 MUIC device support below list of accessories(external connector) */
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_JIG_USB_ON,
+ EXTCON_CABLE_JIG_USB_OFF,
+ EXTCON_CABLE_JIG_UART_OFF,
+ EXTCON_CABLE_JIG_UART_ON,
+
+ _EXTCON_CABLE_NUM,
+};
+
+static const char *max14577_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
+ [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
+ [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+ [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
+
+ NULL,
+};
+
+/*
+ * max14577_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max14577 MUIC
+ * @time: the debounce time of ADC
+ */
+static int max14577_muic_set_debounce_time(struct max14577_muic_info *info,
+ enum max14577_muic_adc_debounce_time time)
+{
+ u8 ret;
+
+ switch (time) {
+ case ADC_DEBOUNCE_TIME_5MS:
+ case ADC_DEBOUNCE_TIME_10MS:
+ case ADC_DEBOUNCE_TIME_25MS:
+ case ADC_DEBOUNCE_TIME_38_62MS:
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_MUIC_REG_CONTROL3,
+ CTRL3_ADCDBSET_MASK,
+ time << CTRL3_ADCDBSET_SHIFT);
+ if (ret) {
+ dev_err(info->dev, "failed to set ADC debounce time\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(info->dev, "invalid ADC debounce time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+};
+
+/*
+ * max14577_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max14577 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max14577 MUIC device share outside H/W line among a varity of cables
+ * so, this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
+static int max14577_muic_set_path(struct max14577_muic_info *info,
+ u8 val, bool attached)
+{
+ int ret = 0;
+ u8 ctrl1, ctrl2 = 0;
+
+ /* Set open state to path before changing hw path */
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_MUIC_REG_CONTROL1,
+ CLEAR_IDBEN_MICEN_MASK, CTRL1_SW_OPEN);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return ret;
+ }
+
+ if (attached)
+ ctrl1 = val;
+ else
+ ctrl1 = CTRL1_SW_OPEN;
+
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_MUIC_REG_CONTROL1,
+ CLEAR_IDBEN_MICEN_MASK, ctrl1);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return ret;
+ }
+
+ if (attached)
+ ctrl2 |= CTRL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
+ else
+ ctrl2 |= CTRL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
+
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_REG_CONTROL2,
+ CTRL2_LOWPWR_MASK | CTRL2_CPEN_MASK, ctrl2);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return ret;
+ }
+
+ dev_dbg(info->dev,
+ "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+ ctrl1, ctrl2, attached ? "attached" : "detached");
+
+ return 0;
+}
+
+/*
+ * max14577_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max14577 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - max14577_CABLE_GROUP_ADC
+ * - max14577_CABLE_GROUP_CHG
+ */
+static int max14577_muic_get_cable_type(struct max14577_muic_info *info,
+ enum max14577_muic_cable_group group, bool *attached)
+{
+ int cable_type = 0;
+ int adc;
+ int chg_type;
+
+ switch (group) {
+ case MAX14577_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[MAX14577_MUIC_STATUS1] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX14577_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX14577_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX14577_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[MAX14577_MUIC_STATUS2] &
+ STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX14577_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX14577_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
+static int max14577_muic_jig_handler(struct max14577_muic_info *info,
+ int cable_type, bool attached)
+{
+ char cable_name[32];
+ int ret = 0;
+ u8 path = CTRL1_SW_OPEN;
+
+ dev_dbg(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-OFF");
+ path = CTRL1_SW_USB;
+ break;
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-ON");
+ path = CTRL1_SW_USB;
+ break;
+ case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
+ /* PATH:AP_UART */
+ strcpy(cable_name, "JIG-UART-OFF");
+ path = CTRL1_SW_UART;
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s jig cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
+ }
+
+ ret = max14577_muic_set_path(info, path, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, cable_name, attached);
+
+ return 0;
+}
+
+static int max14577_muic_adc_handler(struct max14577_muic_info *info)
+{
+ int cable_type;
+ bool attached;
+ int ret = 0;
+
+ /* Check accessory state which is either detached or attached */
+ cable_type = max14577_muic_get_cable_type(info,
+ MAX14577_CABLE_GROUP_ADC, &attached);
+
+ dev_dbg(info->dev,
+ "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type,
+ info->prev_cable_type);
+
+ switch (cable_type) {
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON:
+ case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ /* JIG */
+ ret = max14577_muic_jig_handler(info, cable_type, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX14577_MUIC_ADC_GROUND:
+ case MAX14577_MUIC_ADC_SEND_END_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S1_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S2_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S3_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S4_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S5_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S6_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S7_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S8_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S9_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S10_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S11_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S12_BUTTON:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_1:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_2:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_3:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_4:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_5:
+ case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2:
+ case MAX14577_MUIC_ADC_PHONE_POWERED_DEV:
+ case MAX14577_MUIC_ADC_TTY_CONVERTER:
+ case MAX14577_MUIC_ADC_UART_CABLE:
+ case MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG:
+ case MAX14577_MUIC_ADC_AV_CABLE_NOLOAD:
+ case MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG:
+ case MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON:
+ case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1:
+ /*
+ * This accessory isn't used in general case if it is specially
+ * needed to detect additional accessory, should implement
+ * proper operation when this accessory is attached/detached.
+ */
+ dev_info(info->dev,
+ "accessory is %s but it isn't used (adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s accessory (adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max14577_muic_chg_handler(struct max14577_muic_info *info)
+{
+ int chg_type;
+ bool attached;
+ int ret = 0;
+
+ chg_type = max14577_muic_get_cable_type(info,
+ MAX14577_CABLE_GROUP_CHG, &attached);
+
+ dev_dbg(info->dev,
+ "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
+ attached ? "attached" : "detached",
+ chg_type, info->prev_chg_type);
+
+ switch (chg_type) {
+ case MAX14577_CHARGER_TYPE_USB:
+ /* PATH:AP_USB */
+ ret = max14577_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
+ extcon_set_cable_state(info->edev, "TA", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
+ extcon_set_cable_state(info->edev,
+ "Charge-downstream", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
+ extcon_set_cable_state(info->edev, "Slow-charger", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_SPECIAL_1A:
+ extcon_set_cable_state(info->edev, "Fast-charger", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_NONE:
+ case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
+ break;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s accessory (chg_type:0x%x)\n",
+ attached ? "attached" : "detached", chg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void max14577_muic_irq_work(struct work_struct *work)
+{
+ struct max14577_muic_info *info = container_of(work,
+ struct max14577_muic_info, irq_work);
+ int ret = 0;
+
+ if (!info->edev)
+ return;
+
+ mutex_lock(&info->mutex);
+
+ ret = max14577_bulk_read(info->max14577->regmap,
+ MAX14577_MUIC_REG_STATUS1, info->status, 2);
+ if (ret) {
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ if (info->irq_adc) {
+ ret = max14577_muic_adc_handler(info);
+ info->irq_adc = false;
+ }
+ if (info->irq_chg) {
+ ret = max14577_muic_chg_handler(info);
+ info->irq_chg = false;
+ }
+
+ if (ret < 0)
+ dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
+ mutex_unlock(&info->mutex);
+
+ return;
+}
+
+static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
+{
+ struct max14577_muic_info *info = data;
+ int i, irq_type = -1;
+
+ /*
+ * We may be called multiple times for different nested IRQ-s.
+ * Including changes in INT1_ADC and INT2_CGHTYP at once.
+ * However we only need to know whether it was ADC, charger
+ * or both interrupts so decode IRQ and turn on proper flags.
+ */
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
+ if (irq == muic_irqs[i].virq)
+ irq_type = muic_irqs[i].irq;
+
+ switch (irq_type) {
+ case MAX14577_IRQ_INT1_ADC:
+ case MAX14577_IRQ_INT1_ADCLOW:
+ case MAX14577_IRQ_INT1_ADCERR:
+ /* Handle all of accessory except for
+ type of charger accessory */
+ info->irq_adc = true;
+ break;
+ case MAX14577_IRQ_INT2_CHGTYP:
+ case MAX14577_IRQ_INT2_CHGDETRUN:
+ case MAX14577_IRQ_INT2_DCDTMR:
+ case MAX14577_IRQ_INT2_DBCHG:
+ case MAX14577_IRQ_INT2_VBVOLT:
+ /* Handle charger accessory */
+ info->irq_chg = true;
+ break;
+ default:
+ dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n",
+ irq_type);
+ return IRQ_HANDLED;
+ }
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static int max14577_muic_detect_accessory(struct max14577_muic_info *info)
+{
+ int ret = 0;
+ int adc;
+ int chg_type;
+ bool attached;
+
+ mutex_lock(&info->mutex);
+
+ /* Read STATUSx register to detect accessory */
+ ret = max14577_bulk_read(info->max14577->regmap,
+ MAX14577_MUIC_REG_STATUS1, info->status, 2);
+ if (ret) {
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+
+ adc = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX14577_MUIC_ADC_OPEN) {
+ ret = max14577_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
+
+ chg_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX14577_CHARGER_TYPE_NONE) {
+ ret = max14577_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
+
+static void max14577_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max14577_muic_info *info = container_of(to_delayed_work(work),
+ struct max14577_muic_info, wq_detcable);
+
+ max14577_muic_detect_accessory(info);
+}
+
+static int max14577_muic_probe(struct platform_device *pdev)
+{
+ struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
+ struct max14577_muic_info *info;
+ int delay_jiffies;
+ int ret;
+ int i;
+ u8 id;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ info->dev = &pdev->dev;
+ info->max14577 = max14577;
+
+ platform_set_drvdata(pdev, info);
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, max14577_muic_irq_work);
+
+ /* Support irq domain for max14577 MUIC device */
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
+ struct max14577_muic_irq *muic_irq = &muic_irqs[i];
+ unsigned int virq = 0;
+
+ virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
+ if (!virq)
+ return -EINVAL;
+ muic_irq->virq = virq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
+ max14577_muic_irq_handler,
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed: irq request (IRQ: %d,"
+ " error :%d)\n",
+ muic_irq->irq, ret);
+ return ret;
+ }
+ }
+
+ /* Initialize extcon device */
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(*info->edev), GFP_KERNEL);
+ if (!info->edev) {
+ dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
+ return -ENOMEM;
+ }
+ info->edev->name = DEV_NAME;
+ info->edev->supported_cable = max14577_extcon_cable;
+ ret = extcon_dev_register(info->edev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ /* Default h/w line path */
+ info->path_usb = CTRL1_SW_USB;
+ info->path_uart = CTRL1_SW_UART;
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+
+ /* Set initial path for UART */
+ max14577_muic_set_path(info, info->path_uart, true);
+
+ /* Check revision number of MUIC device*/
+ ret = max14577_read_reg(info->max14577->regmap,
+ MAX14577_REG_DEVICEID, &id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read revision number\n");
+ goto err_extcon;
+ }
+ dev_info(info->dev, "device ID : 0x%x\n", id);
+
+ /* Set ADC debounce time */
+ max14577_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
+
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
+ ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ delay_jiffies);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to schedule delayed work for cable detect\n");
+ goto err_extcon;
+ }
+
+ return ret;
+
+err_extcon:
+ extcon_dev_unregister(info->edev);
+ return ret;
+}
+
+static int max14577_muic_remove(struct platform_device *pdev)
+{
+ struct max14577_muic_info *info = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&info->irq_work);
+ extcon_dev_unregister(info->edev);
+
+ return 0;
+}
+
+static struct platform_driver max14577_muic_driver = {
+ .driver = {
+ .name = DEV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = max14577_muic_probe,
+ .remove = max14577_muic_remove,
+};
+
+module_platform_driver(max14577_muic_driver);
+
+MODULE_DESCRIPTION("MAXIM 14577 Extcon driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:extcon-max14577");
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 6c91976..2aea4bc 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -78,20 +78,24 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
{
- unsigned int set;
+ unsigned int set, id_src;
struct palmas_usb *palmas_usb = _palmas_usb;
palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_SET, &set);
+ palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+ PALMAS_USB_ID_INT_SRC, &id_src);
- if (set & PALMAS_USB_ID_INT_SRC_ID_GND) {
+ if ((set & PALMAS_USB_ID_INT_SRC_ID_GND) &&
+ (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
- } else if (set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) {
+ } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
+ (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
@@ -103,6 +107,11 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
+ (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
+ palmas_usb->linkstat = PALMAS_USB_STATE_ID;
+ extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
+ dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
}
return IRQ_HANDLED;
@@ -269,7 +278,9 @@ static const struct dev_pm_ops palmas_pm_ops = {
static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-usb", },
+ { .compatible = "ti,palmas-usb-vid", },
{ .compatible = "ti,twl6035-usb", },
+ { .compatible = "ti,twl6035-usb-vid", },
{ /* end */ }
};
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5..5373dc5 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
+obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index eb26d62..e0f1cb3 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -553,7 +553,7 @@ static const struct bin_attribute dmi_entry_raw_attr = {
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
- sysfs_remove_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+
spin_lock(&entry_list_lock);
list_del(&entry->list);
spin_unlock(&entry_list_lock);
@@ -685,6 +685,7 @@ static void __exit dmi_sysfs_exit(void)
pr_debug("dmi-sysfs: unloading.\n");
cleanup_entry_list();
kset_unregister(dmi_kset);
+ kobject_del(dmi_kobj);
kobject_put(dmi_kobj);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4..1e75f48 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,18 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
-config UEFI_CPER
- def_bool n
+config EFI_RUNTIME_MAP
+ bool "Export efi runtime maps to sysfs"
+ depends on X86 && EFI && KEXEC
+ default y
+ help
+ Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
+ That memory map is used for example by kexec to set up efi virtual
+ mapping the 2nd kernel, but can also be used for debugging purposes.
+
+ See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
endmenu
+
+config UEFI_CPER
+ bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d..9553496 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,8 @@
#
# Makefile for linux kernel
#
-obj-y += efi.o vars.o
+obj-$(CONFIG_EFI) += efi.o vars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
+obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2e2fbde..4753bac 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -32,6 +32,9 @@ struct efi __read_mostly efi = {
.hcdp = EFI_INVALID_TABLE_ADDR,
.uga = EFI_INVALID_TABLE_ADDR,
.uv_systab = EFI_INVALID_TABLE_ADDR,
+ .fw_vendor = EFI_INVALID_TABLE_ADDR,
+ .runtime = EFI_INVALID_TABLE_ADDR,
+ .config_table = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -71,13 +74,49 @@ static ssize_t systab_show(struct kobject *kobj,
static struct kobj_attribute efi_attr_systab =
__ATTR(systab, 0400, systab_show, NULL);
+#define EFI_FIELD(var) efi.var
+
+#define EFI_ATTR_SHOW(name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
+}
+
+EFI_ATTR_SHOW(fw_vendor);
+EFI_ATTR_SHOW(runtime);
+EFI_ATTR_SHOW(config_table);
+
+static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
+static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
+static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
+
static struct attribute *efi_subsys_attrs[] = {
&efi_attr_systab.attr,
- NULL, /* maybe more in the future? */
+ &efi_attr_fw_vendor.attr,
+ &efi_attr_runtime.attr,
+ &efi_attr_config_table.attr,
+ NULL,
};
+static umode_t efi_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ umode_t mode = attr->mode;
+
+ if (attr == &efi_attr_fw_vendor.attr)
+ return (efi.fw_vendor == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
+ else if (attr == &efi_attr_runtime.attr)
+ return (efi.runtime == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
+ else if (attr == &efi_attr_config_table.attr)
+ return (efi.config_table == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
+
+ return mode;
+}
+
static struct attribute_group efi_subsys_attr_group = {
.attrs = efi_subsys_attrs,
+ .is_visible = efi_attr_is_visible,
};
static struct efivars generic_efivars;
@@ -128,6 +167,10 @@ static int __init efisubsys_init(void)
goto err_unregister;
}
+ error = efi_runtime_map_init(efi_kobj);
+ if (error)
+ goto err_remove_group;
+
/* and the standard mountpoint for efivarfs */
efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
if (!efivars_kobj) {
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
new file mode 100644
index 0000000..97cdd16
--- /dev/null
+++ b/drivers/firmware/efi/runtime-map.c
@@ -0,0 +1,181 @@
+/*
+ * linux/drivers/efi/runtime-map.c
+ * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+
+#include <asm/setup.h>
+
+static void *efi_runtime_map;
+static int nr_efi_runtime_map;
+static u32 efi_memdesc_size;
+
+struct efi_runtime_map_entry {
+ efi_memory_desc_t md;
+ struct kobject kobj; /* kobject for each entry */
+};
+
+static struct efi_runtime_map_entry **map_entries;
+
+struct map_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
+};
+
+static inline struct map_attribute *to_map_attr(struct attribute *attr)
+{
+ return container_of(attr, struct map_attribute, attr);
+}
+
+static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
+}
+
+#define EFI_RUNTIME_FIELD(var) entry->md.var
+
+#define EFI_RUNTIME_U64_ATTR_SHOW(name) \
+static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
+{ \
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
+}
+
+EFI_RUNTIME_U64_ATTR_SHOW(phys_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(virt_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
+EFI_RUNTIME_U64_ATTR_SHOW(attribute);
+
+static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct efi_runtime_map_entry, kobj);
+}
+
+static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct efi_runtime_map_entry *entry = to_map_entry(kobj);
+ struct map_attribute *map_attr = to_map_attr(attr);
+
+ return map_attr->show(entry, buf);
+}
+
+static struct map_attribute map_type_attr = __ATTR_RO(type);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
+static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
+static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+ &map_type_attr.attr,
+ &map_phys_addr_attr.attr,
+ &map_virt_addr_attr.attr,
+ &map_num_pages_attr.attr,
+ &map_attribute_attr.attr,
+ NULL
+};
+
+static const struct sysfs_ops map_attr_ops = {
+ .show = map_attr_show,
+};
+
+static void map_release(struct kobject *kobj)
+{
+ struct efi_runtime_map_entry *entry;
+
+ entry = to_map_entry(kobj);
+ kfree(entry);
+}
+
+static struct kobj_type __refdata map_ktype = {
+ .sysfs_ops = &map_attr_ops,
+ .default_attrs = def_attrs,
+ .release = map_release,
+};
+
+static struct kset *map_kset;
+
+static struct efi_runtime_map_entry *
+add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
+{
+ int ret;
+ struct efi_runtime_map_entry *entry;
+
+ if (!map_kset) {
+ map_kset = kset_create_and_add("runtime-map", NULL, kobj);
+ if (!map_kset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ kset_unregister(map_kset);
+ return entry;
+ }
+
+ memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
+ sizeof(efi_memory_desc_t));
+
+ kobject_init(&entry->kobj, &map_ktype);
+ entry->kobj.kset = map_kset;
+ ret = kobject_add(&entry->kobj, NULL, "%d", nr);
+ if (ret) {
+ kobject_put(&entry->kobj);
+ kset_unregister(map_kset);
+ return ERR_PTR(ret);
+ }
+
+ return entry;
+}
+
+void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size)
+{
+ efi_runtime_map = map;
+ nr_efi_runtime_map = nr_entries;
+ efi_memdesc_size = desc_size;
+}
+
+int __init efi_runtime_map_init(struct kobject *efi_kobj)
+{
+ int i, j, ret = 0;
+ struct efi_runtime_map_entry *entry;
+
+ if (!efi_runtime_map)
+ return 0;
+
+ map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL);
+ if (!map_entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nr_efi_runtime_map; i++) {
+ entry = add_sysfs_runtime_map_entry(efi_kobj, i);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ goto out_add_entry;
+ }
+ *(map_entries + i) = entry;
+ }
+
+ return 0;
+out_add_entry:
+ for (j = i - 1; j > 0; j--) {
+ entry = *(map_entries + j);
+ kobject_put(&entry->kobj);
+ }
+ if (map_kset)
+ kset_unregister(map_kset);
+out:
+ return ret;
+}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 85f772c..c8a7c81 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -393,7 +393,7 @@ static const DEVICE_ATTR(value, 0644,
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
- struct sysfs_dirent *value_sd = priv;
+ struct kernfs_node *value_sd = priv;
sysfs_notify_dirent(value_sd);
return IRQ_HANDLED;
@@ -402,7 +402,7 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
unsigned long gpio_flags)
{
- struct sysfs_dirent *value_sd;
+ struct kernfs_node *value_sd;
unsigned long irq_flags;
int ret, irq, id;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1..b073315 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type = pmode->type;
+ mode->type |= pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 621c7c6..76d3d1a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
{
- u32 completed_seqno;
- u32 acthd;
+ u32 completed_seqno = ring->get_seqno(ring, false);
+ u32 acthd = intel_ring_get_active_head(ring);
+ struct drm_i915_gem_request *request;
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (i915_seqno_passed(completed_seqno, request->seqno))
+ continue;
- acthd = intel_ring_get_active_head(ring);
- completed_seqno = ring->get_seqno(ring, false);
+ i915_set_reset_status(ring, request, acthd);
+ }
+}
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
- if (request->seqno > completed_seqno)
- i915_set_reset_status(ring, request, acthd);
-
i915_gem_free_request(request);
}
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
struct intel_ring_buffer *ring;
int i;
+ /*
+ * Before we free the objects from the requests, we need to inspect
+ * them for finding the guilty party. As the requests only borrow
+ * their reference to the objects, the inspection must be done first.
+ */
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_status(dev_priv, ring);
+
for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_reset_ring_cleanup(dev_priv, ring);
i915_gem_cleanup_ringbuffer(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7e787f..a3ba9a8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
{
struct drm_i915_gem_object *obj;
struct list_head objects;
- int i, ret = 0;
+ int i, ret;
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
- goto out;
+ goto err;
}
if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
- goto out;
+ goto err;
}
drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
spin_unlock(&file->table_lock);
i = 0;
- list_for_each_entry(obj, &objects, obj_exec_link) {
+ while (!list_empty(&objects)) {
struct i915_vma *vma;
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
- goto out;
+ goto err;
}
+ /* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas);
+ list_del_init(&obj->obj_exec_link);
vma->exec_entry = &exec[i];
if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
++i;
}
+ return 0;
+
-out:
+err:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
- if (ret)
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&obj->base);
}
+ /*
+ * Objects already transfered to the vmas list will be unreferenced by
+ * eb_destroy.
+ */
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c79dd2b..d3c3b5b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true));
-#if 0 /* TODO: Still needed on GEN8? */
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-#endif
}
/*
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5d1dedc..f13d5ed 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2713,6 +2713,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
#undef GEN8_IRQ_INIT_NDX
POSTING_READ(GEN8_PCU_IIR);
+
+ ibx_irq_preinstall(dev);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 526c8de..b69dc3e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1057,12 +1057,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *intel_crtc;
+ dev_priv->ddi_plls.spll_refcount = 0;
+ dev_priv->ddi_plls.wrpll1_refcount = 0;
+ dev_priv->ddi_plls.wrpll2_refcount = 0;
+
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- if (!intel_crtc->active)
+ if (!intel_crtc->active) {
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
+ }
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8b8bde7..2bde35d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
uint32_t val;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
- /*
- * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
- * seem to use inverted backlight PWM.
- */
- { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -11044,10 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
- drm_modeset_lock_all(dev);
+ mutex_lock(&dev->mode_config.mutex);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev->mode_config.mutex);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -11126,14 +11135,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3657ab4..26c29c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
unsigned long irqflags;
uint32_t tmp;
+ WARN_ON(dev_priv->pc8.enabled);
+
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well)
{
- if (!power_well->count++)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!power_well->count++) {
+ hsw_disable_package_c8(dev_priv);
__intel_set_power_well(dev, true);
+ }
}
static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
WARN_ON(!power_well->count);
- if (!--power_well->count && i915_disable_power_well)
+ if (!--power_well->count && i915_disable_power_well) {
__intel_set_power_well(dev, false);
+ hsw_enable_package_c8(dev_priv);
+ }
}
void intel_display_power_get(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f0637..2ea5568 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (parent) {
struct nouveau_device *device = nv_device(parent);
- int subidx = nv_hclass(subdev) & 0xff;
-
subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
subdev->mmio = nv_subdev(device)->mmio;
- device->subdev[subidx] = *pobject;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a..dd01c6c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ device->subdev[i] = devobj->subdev[i];
+
/* note: can't init *any* subdevs until devinit has been run
* due to not knowing exactly what the vbios init tables will
* mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef..dbc5e33 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b..5c8a63d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
while ((mthd = &mthds[i++]) && (init = mthd->init)) {
u32 addr = 0x80000000 | mthd->oclass;
for (data = 0; init->count; init++) {
- if (data != init->data) {
+ if (init == mthd->init || data != init->data) {
nv_wr32(priv, 0x40448c, init->data);
data = init->data;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa3..d89dbdf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
static inline struct nouveau_fb *
nouveau_fb(void *obj)
{
+ /* fbram uses this before device subdev pointer is valid */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_FB)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
}
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da7..7f50a85 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct nouveau_i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *));
+ struct i2c_board_info *, void *), void *);
struct list_head ports;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e..4aca338 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
static inline struct nouveau_instmem *
nouveau_instmem(void *obj)
{
+ /* nv04/nv40 impls need to create objects in their constructor,
+ * which is before the subdev pointer is valid
+ */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908c..df1b1b4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
init_script(struct nouveau_bios *bios, int index)
{
struct nvbios_init init = { .bios = bios };
- u16 data;
+ u16 bmp_ver = bmp_version(bios), data;
- if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
- if (index > 1)
+ if (bmp_ver && bmp_ver < 0x0510) {
+ if (index > 1 || bmp_ver < 0x0100)
return 0x0000;
- data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+ data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
return nv_ro16(bios, data + (index * 2));
}
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
u16 offset = nv_ro16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
- init->offset = offset;
+
+ if (init_exec(init))
+ init->offset = offset;
+ else
+ init->offset += 3;
}
/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5e..c33c03d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
struct nouveau_i2c_board_info *info,
bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *))
+ struct i2c_board_info *, void *), void *data)
{
struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
if (nv_probe_i2c(port, info[i].dev.addr) &&
- (!match || match(port, &info[i].dev))) {
+ (!match || match(port, &info[i].dev, data))) {
nv_info(i2c, "detected %s: %s\n", what,
info[i].dev.type);
return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
index af129c2..64f8b47 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
static int
mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
{
- struct nouveau_mxm *mxm = nouveau_mxm(bios);
+ struct nouveau_mxm *mxm = data;
struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
u8 type, i2cidx, link, ver, len;
u8 *conn;
@@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
return;
}
- dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
+ dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b..7610fc5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
- struct i2c_board_info *info)
+ struct i2c_board_info *info, void *data)
{
- struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
+ struct nouveau_therm_priv *priv = data;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
struct i2c_client *client;
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device);
+ board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device);
+ board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
device. Let's try our static list.
*/
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- nv_board_infos, probe_monitoring_device);
+ nv_board_infos, probe_monitoring_device, therm);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c..7fdc51e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
get_tmds_slave(encoder))
return;
- type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
+ type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
if (type < 0)
return;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208..244822d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
- nv04_tv_encoder_info, NULL);
+ nv04_tv_encoder_info, NULL, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81..900fae0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
+ info->offset = ntfy->node->offset;
+
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c7404..ba0183f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
+ acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM"))
+ if (!acpi_has_method(dhandle, "_DSM")) {
+ nouveau_dsm_priv.other_handle = dhandle;
return false;
-
+ }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when _DSM is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
+ acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efd..25ea82f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
if (ret)
- return ret;
+ goto fail_free;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324..66ac0ff 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
+ select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c2..d70aafb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
*/
-#include "linux/crc32.h"
+#include <linux/crc32.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b197059..0b9621c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_BONAIRE)
- tmp = rdev->config.cik.tile_config;
- else if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
- tmp = rdev->config.cayman.tile_config;
- else
- tmp = rdev->config.evergreen.tile_config;
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
- switch ((tmp & 0xf0) >> 4) {
- case 0: /* 4 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
- break;
- case 1: /* 8 banks */
- default:
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
- break;
- case 2: /* 16 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
- break;
+ /* Set NUM_BANKS. */
+ if (rdev->family >= CHIP_BONAIRE) {
+ unsigned tileb, index, num_banks, tile_split_bytes;
+
+ /* Calculate the macrotile mode index. */
+ tile_split_bytes = 64 << tile_split;
+ tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = min(tile_split_bytes, tileb);
+
+ for (index = 0; tileb > 64; index++) {
+ tileb >>= 1;
+ }
+
+ if (index >= 16) {
+ DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+ target_fb->bits_per_pixel, tile_split);
+ return -EINVAL;
+ }
+
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+ } else {
+ /* SI and older. */
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
}
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
- evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,19 +1202,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
if (rdev->family >= CHIP_BONAIRE) {
- u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
- u32 num_rb = rdev->config.cik.max_backends_per_se;
- if (num_pipe_configs > 8)
- num_pipe_configs = 8;
- if (num_pipe_configs == 8)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
- else if (num_pipe_configs == 4) {
- if (num_rb == 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
- else if (num_rb < 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
- } else if (num_pipe_configs == 2)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
+ /* Read the pipe config from the 2D TILED SCANOUT mode.
+ * It should be the same for the other modes too, but not all
+ * modes set the pipe config field. */
+ u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+ fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
} else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3..e950fab 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
* Returns the disabled RB bitmask.
*/
static u32 cik_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
*/
static void cik_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
- data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
if (rdev->family == CHIP_HAWAII)
disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.cik.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493..713a5d3 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.enabled = true;
if (ASIC_IS_DCE8(rdev))
- rdev->audio.num_pins = 7;
+ rdev->audio.num_pins = 6;
+ else if (ASIC_IS_DCE61(rdev))
+ rdev->audio.num_pins = 4;
else
rdev->audio.num_pins = 6;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4..0c6d5ce 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2a..f59a9e9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
- rdev->config.cayman.sx_max_export_size = 256;
- rdev->config.cayman.sx_max_export_pos_size = 64;
- rdev->config.cayman.sx_max_export_smx_size = 192;
- rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1f990d..45e1f44 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302ea..485848f 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
+ acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ radeon_atpx_priv.other_handle = dhandle;
return false;
-
+ }
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when ATPX is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
+ acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1958b36a..db39ea3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
+ * 2.36.0 - Fix CIK DCE tiling setup
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b47..21d593c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
+ case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+ if (rdev->family >= CHIP_BONAIRE) {
+ *value = rdev->config.cik.backend_enable_mask;
+ } else if (rdev->family >= CHIP_TAHITI) {
+ *value = rdev->config.si.backend_enable_mask;
+ } else {
+ DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088..b9c0529 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- if ((start >> 28) != (end >> 28)) {
+ if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
start, end);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025..374499d 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
+ /* disable ss, causes hangs on some cayman boards */
+ if (rdev->family == CHIP_CAYMAN) {
+ pi->sclk_ss = false;
+ pi->mclk_ss = false;
+ }
+
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a36736d..85e1edf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
}
static u32 si_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
- data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.si.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a9..4061521 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
- (ttm == NULL || ttm->state == tt_unpopulated)) {
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index f0c5e07..bcb4950 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -301,7 +301,7 @@ err:
return -ENOMEM;
}
-void hv_synic_free_cpu(int cpu)
+static void hv_synic_free_cpu(int cpu)
{
kfree(hv_context.event_dpc[cpu]);
if (hv_context.synic_event_page[cpu])
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 78be661..bbb0b0d 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/moduleparam.h>
+#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
@@ -52,7 +53,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
+#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
@@ -176,20 +177,33 @@ static ssize_t show_temp(struct device *dev,
/* Check whether the time interval has elapsed */
if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
- tdata->valid = 0;
- /* Check whether the data is valid */
- if (eax & 0x80000000) {
- tdata->temp = tdata->tjmax -
- ((eax >> 16) & 0x7f) * 1000;
- tdata->valid = 1;
- }
+ /*
+ * Ignore the valid bit. In all observed cases the register
+ * value is either low or zero if the valid bit is 0.
+ * Return it instead of reporting an error which doesn't
+ * really help at all.
+ */
+ tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+ tdata->valid = 1;
tdata->last_updated = jiffies;
}
mutex_unlock(&tdata->update_lock);
- return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
+ return sprintf(buf, "%d\n", tdata->temp);
}
+struct tjmax_pci {
+ unsigned int device;
+ int tjmax;
+};
+
+static const struct tjmax_pci tjmax_pci_table[] = {
+ { 0x0708, 110000 }, /* CE41x0 (Sodaville ) */
+ { 0x0c72, 102000 }, /* Atom S1240 (Centerton) */
+ { 0x0c73, 95000 }, /* Atom S1220 (Centerton) */
+ { 0x0c75, 95000 }, /* Atom S1260 (Centerton) */
+};
+
struct tjmax {
char const *id;
int tjmax;
@@ -198,9 +212,6 @@ struct tjmax {
static const struct tjmax tjmax_table[] = {
{ "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
{ "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
- { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
- { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
- { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
};
struct tjmax_model {
@@ -222,8 +233,11 @@ static const struct tjmax_model tjmax_model_table[] = {
* is undetectable by software
*/
{ 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
- { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */
- { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
+ { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
+ { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
+ * Also matches S12x0 (stepping 9), covered by
+ * PCI table
+ */
};
static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
@@ -236,8 +250,20 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
+ struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+
+ /*
+ * Explicit tjmax table entries override heuristics.
+ * First try PCI host bridge IDs, followed by model ID strings
+ * and model/stepping information.
+ */
+ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
+ for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
+ if (host_bridge->device == tjmax_pci_table[i].device)
+ return tjmax_pci_table[i].tjmax;
+ }
+ }
- /* explicit tjmax table entries override heuristics */
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
return tjmax_table[i].tjmax;
@@ -343,12 +369,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
- val = (eax >> 16) & 0xff;
+ val = (eax >> 16) & 0x7f;
/*
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val) {
+ if (val >= 85) {
dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 960fac3..afd3104 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -45,7 +45,7 @@ static const char * const input_names[] = {
/* Conversion function for VDDOUT and VBAT */
static inline int volt_reg_to_mv(int value)
{
- return DIV_ROUND_CLOSEST(value * 1000, 512) + 2500;
+ return DIV_ROUND_CLOSEST(value * 2000, 1023) + 2500;
}
/* Conversion function for ADC channels 4, 5 and 6 */
@@ -57,7 +57,7 @@ static inline int input_reg_to_mv(int value)
/* Conversion function for VBBAT */
static inline int vbbat_reg_to_mv(int value)
{
- return DIV_ROUND_CLOSEST(value * 2500, 512);
+ return DIV_ROUND_CLOSEST(value * 5000, 1023);
}
static inline int da9052_enable_vddout_channel(struct da9052 *da9052)
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index dff8410..6040121 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -249,7 +249,7 @@ static void fam15h_power_remove(struct pci_dev *pdev)
sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group);
}
-static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = {
+static const struct pci_device_id fam15h_power_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d65f3fd..baf375b 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -204,12 +204,13 @@ static void k10temp_remove(struct pci_dev *pdev)
&sensor_dev_attr_temp1_crit_hyst.dev_attr);
}
-static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
+static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{}
};
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5b50e9e..734d55d 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -135,7 +135,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static DEFINE_PCI_DEVICE_TABLE(k8temp_ids) = {
+static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ 0 },
};
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index cf811c1..8686e96 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -3936,6 +3936,18 @@ static int nct6775_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static void nct6791_enable_io_mapping(int sioaddr)
+{
+ int val;
+
+ val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+ if (val & 0x10) {
+ pr_info("Enabling hardware monitor logical device mappings.\n");
+ superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+ val & ~0x10);
+ }
+}
+
#ifdef CONFIG_PM
static int nct6775_suspend(struct device *dev)
{
@@ -3955,11 +3967,20 @@ static int nct6775_suspend(struct device *dev)
static int nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- int i, j;
+ int i, j, err = 0;
mutex_lock(&data->update_lock);
data->bank = 0xff; /* Force initial bank selection */
+ if (data->kind == nct6791) {
+ err = superio_enter(data->sioreg);
+ if (err)
+ goto abort;
+
+ nct6791_enable_io_mapping(data->sioreg);
+ superio_exit(data->sioreg);
+ }
+
/* Restore limits */
for (i = 0; i < data->in_num; i++) {
if (!(data->have_in & (1 << i)))
@@ -3996,11 +4017,12 @@ static int nct6775_resume(struct device *dev)
nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
}
+abort:
/* Force re-reading all values */
data->valid = false;
mutex_unlock(&data->update_lock);
- return 0;
+ return err;
}
static const struct dev_pm_ops nct6775_dev_pm_ops = {
@@ -4088,15 +4110,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
- if (sio_data->kind == nct6791) {
- val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
- if (val & 0x10) {
- pr_info("Enabling hardware monitor logical device mappings.\n");
- superio_outb(sioaddr,
- NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
- val & ~0x10);
- }
- }
+
+ if (sio_data->kind == nct6791)
+ nct6791_enable_io_mapping(sioaddr);
superio_exit(sioaddr);
pr_info("Found %s or compatible chip at %#x:%#x\n",
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 72a8897..e74bd7e 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -754,7 +754,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
return data;
}
-static DEFINE_PCI_DEVICE_TABLE(sis5595_pci_ids) = {
+static const struct pci_device_id sis5595_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index c9dcce8..babd732 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -824,7 +824,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
return data;
}
-static DEFINE_PCI_DEVICE_TABLE(via686a_pci_ids) = {
+static const struct pci_device_id via686a_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
{ }
};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index aee14e2..b3babe3 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -766,7 +766,7 @@ static struct platform_driver vt8231_driver = {
.remove = vt8231_remove,
};
-static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = {
+static const struct pci_device_id vt8231_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
{ 0, }
};
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index b1d3859..46eaf58 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -198,7 +198,7 @@ fail_base2:
continue;
}
}
- buddha_board = ZTWO_VADDR(board);
+ buddha_board = (unsigned long)ZTWO_VADDR(board);
/* write to BUDDHA_IRQ_MR to enable the board IRQ */
/* X-Surf doesn't have this. IRQs are always on */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 92d1206..6c0e045 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[] __initdata = {
+static struct cpuidle_state nehalem_cstates[] = {
{
.name = "C1-NHM",
.desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[] __initdata = {
+static struct cpuidle_state snb_cstates[] = {
{
.name = "C1-SNB",
.desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[] __initdata = {
+static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1-IVB",
.desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[] __initdata = {
+static struct cpuidle_state hsw_cstates[] = {
{
.name = "C1-HSW",
.desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[] __initdata = {
+static struct cpuidle_state atom_cstates[] = {
{
.name = "C1E-ATM",
.desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
{
.enter = NULL }
};
-static struct cpuidle_state avn_cstates[] __initdata = {
+static struct cpuidle_state avn_cstates[] = {
{
.name = "C1-AVN",
.desc = "MWAIT 0x00",
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.exit_latency = 15,
.target_residency = 45,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
/**
@@ -375,13 +377,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- if (!current_set_polling_and_test()) {
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(eax, ecx);
- }
+ mwait_idle_with_hints(eax, ecx);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 90cf0cd..5dd0e12 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -65,9 +65,11 @@ source "drivers/iio/common/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/frequency/Kconfig"
source "drivers/iio/gyro/Kconfig"
+source "drivers/iio/humidity/Kconfig"
source "drivers/iio/imu/Kconfig"
source "drivers/iio/light/Kconfig"
source "drivers/iio/magnetometer/Kconfig"
+source "drivers/iio/orientation/Kconfig"
if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index bcf7e9e..887d3909 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,9 +18,11 @@ obj-y += common/
obj-y += dac/
obj-y += gyro/
obj-y += frequency/
+obj-y += humidity/
obj-y += imu/
obj-y += light/
obj-y += magnetometer/
+obj-y += orientation/
obj-y += pressure/
obj-y += temperature/
obj-y += trigger/
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 28b3928..3bec922 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -455,7 +455,12 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('s', 14, 16, 2), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 14, \
+ .storagebits = 16, \
+ .shift = 2, \
+ }, \
.ext_info = bma180_ext_info, \
}
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 1cae4e9..3dcdbad 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -262,6 +262,18 @@ static int accel_3d_parse_report(struct platform_device *pdev,
st->accel[1].index, st->accel[1].report_id,
st->accel[2].index, st->accel[2].report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ACCELERATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
return ret;
}
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 58e9455..70f78c3 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -43,19 +43,22 @@ struct ad7266_state {
* The buffer needs to be large enough to hold two samples (4 bytes) and
* the naturally aligned timestamp (8 bytes).
*/
- uint8_t data[ALIGN(4, sizeof(s64)) + sizeof(s64)] ____cacheline_aligned;
+ struct {
+ __be16 sample[2];
+ s64 timestamp;
+ } data ____cacheline_aligned;
};
static int ad7266_wakeup(struct ad7266_state *st)
{
/* Any read with >= 2 bytes will wake the device */
- return spi_read(st->spi, st->data, 2);
+ return spi_read(st->spi, &st->data.sample[0], 2);
}
static int ad7266_powerdown(struct ad7266_state *st)
{
/* Any read with < 2 bytes will powerdown the device */
- return spi_read(st->spi, st->data, 1);
+ return spi_read(st->spi, &st->data.sample[0], 1);
}
static int ad7266_preenable(struct iio_dev *indio_dev)
@@ -84,9 +87,9 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
struct ad7266_state *st = iio_priv(indio_dev);
int ret;
- ret = spi_read(st->spi, st->data, 4);
+ ret = spi_read(st->spi, st->data.sample, 4);
if (ret == 0) {
- iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
pf->timestamp);
}
@@ -137,7 +140,7 @@ static int ad7266_read_single(struct ad7266_state *st, int *val,
ad7266_select_input(st, address);
ret = spi_sync(st->spi, &st->single_msg);
- *val = be16_to_cpu(st->data[address % 2]);
+ *val = be16_to_cpu(st->data.sample[address % 2]);
return ret;
}
@@ -442,15 +445,15 @@ static int ad7266_probe(struct spi_device *spi)
ad7266_init_channels(indio_dev);
/* wakeup */
- st->single_xfer[0].rx_buf = &st->data;
+ st->single_xfer[0].rx_buf = &st->data.sample[0];
st->single_xfer[0].len = 2;
st->single_xfer[0].cs_change = 1;
/* conversion */
- st->single_xfer[1].rx_buf = &st->data;
+ st->single_xfer[1].rx_buf = st->data.sample;
st->single_xfer[1].len = 4;
st->single_xfer[1].cs_change = 1;
/* powerdown */
- st->single_xfer[2].tx_buf = &st->data;
+ st->single_xfer[2].tx_buf = &st->data.sample[0];
st->single_xfer[2].len = 1;
spi_message_init(&st->single_msg);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 6118dce..e283f2f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1039,10 +1039,10 @@ static const struct iio_info max1238_info = {
};
static const struct iio_info max1363_info = {
- .read_event_value_new = &max1363_read_thresh,
- .write_event_value_new = &max1363_write_thresh,
- .read_event_config_new = &max1363_read_event_config,
- .write_event_config_new = &max1363_write_event_config,
+ .read_event_value = &max1363_read_thresh,
+ .write_event_value = &max1363_write_thresh,
+ .read_event_config = &max1363_read_event_config,
+ .write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
.update_scan_mode = &max1363_update_scan_mode,
.driver_module = THIS_MODULE,
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index c8c1baa..47dcb34 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -362,7 +362,7 @@ static int mcp3422_probe(struct i2c_client *client,
| MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240));
mcp3422_update_config(adc, config);
- err = iio_device_register(indio_dev);
+ err = devm_iio_device_register(&client->dev, indio_dev);
if (err < 0)
return err;
@@ -371,12 +371,6 @@ static int mcp3422_probe(struct i2c_client *client,
return 0;
}
-static int mcp3422_remove(struct i2c_client *client)
-{
- iio_device_unregister(i2c_get_clientdata(client));
- return 0;
-}
-
static const struct i2c_device_id mcp3422_id[] = {
{ "mcp3422", 2 },
{ "mcp3423", 3 },
@@ -400,7 +394,6 @@ static struct i2c_driver mcp3422_driver = {
.of_match_table = of_match_ptr(mcp3422_of_match),
},
.probe = mcp3422_probe,
- .remove = mcp3422_remove,
.id_table = mcp3422_id,
};
module_i2c_driver(mcp3422_driver);
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 09727a7..d0add8f 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -42,12 +42,6 @@ struct vprbrd_adc {
.indexed = 1, \
.channel = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .scan_index = _index, \
- .scan_type = { \
- .sign = 'u', \
- .realbits = 8, \
- .storagebits = 8, \
- }, \
}
static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
@@ -73,7 +67,7 @@ static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
mutex_lock(&vb->lock);
admsg->cmd = VPRBRD_ADC_CMD_GET;
- admsg->chan = chan->scan_index;
+ admsg->chan = chan->channel;
admsg->val = 0x00;
ret = usb_control_msg(vb->usb_dev,
@@ -139,7 +133,7 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
indio_dev->channels = vprbrd_adc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio (adc)");
return ret;
@@ -150,22 +144,12 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
return 0;
}
-static int vprbrd_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
static struct platform_driver vprbrd_adc_driver = {
.driver = {
.name = "viperboard-adc",
.owner = THIS_MODULE,
},
.probe = vprbrd_adc_probe,
- .remove = vprbrd_adc_remove,
};
module_platform_driver(vprbrd_adc_driver);
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index cb9c636..f03b92f 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -299,7 +299,12 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = addr, \
- .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 20 - bits, \
+ }, \
.ext_info = ad5064_ext_info, \
}
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index b968af5..64634d7 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -107,7 +107,12 @@ enum ad5360_type {
BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
- .scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 16 - (bits), \
+ }, \
}
static const struct ad5360_chip_info ad5360_chip_info_tbl[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index a59ff0e..9de4c4d 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -261,7 +261,12 @@ static struct iio_chan_spec_ext_info ad5380_ext_info[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = IIO_ST('u', (_bits), 16, 14 - (_bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 14 - (_bits), \
+ }, \
.ext_info = ad5380_ext_info, \
}
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 3eeaa82..787ef1d 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -75,7 +75,7 @@ struct ad5421_state {
* transfer buffers to live in their own cache lines.
*/
union {
- u32 d32;
+ __be32 d32;
u8 d8[4];
} data[2] ____cacheline_aligned;
};
@@ -114,7 +114,11 @@ static const struct iio_chan_spec ad5421_channels[] = {
BIT(IIO_CHAN_INFO_CALIBBIAS),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
- .scan_type = IIO_ST('u', 16, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ },
.event_spec = ad5421_current_event,
.num_event_specs = ARRAY_SIZE(ad5421_current_event),
},
@@ -458,9 +462,9 @@ static int ad5421_read_event_value(struct iio_dev *indio_dev,
static const struct iio_info ad5421_info = {
.read_raw = ad5421_read_raw,
.write_raw = ad5421_write_raw,
- .read_event_config_new = ad5421_read_event_config,
- .write_event_config_new = ad5421_write_event_config,
- .read_event_value_new = ad5421_read_event_value,
+ .read_event_config = ad5421_read_event_config,
+ .write_event_config = ad5421_write_event_config,
+ .read_event_value = ad5421_read_event_value,
.driver_module = THIS_MODULE,
};
@@ -514,16 +518,7 @@ static int ad5421_probe(struct spi_device *spi)
return ret;
}
- return iio_device_register(indio_dev);
-}
-
-static int ad5421_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver ad5421_driver = {
@@ -532,7 +527,6 @@ static struct spi_driver ad5421_driver = {
.owner = THIS_MODULE,
},
.probe = ad5421_probe,
- .remove = ad5421_remove,
};
module_spi_driver(ad5421_driver);
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 1263b0e..46bb62a 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -139,14 +139,19 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
{ },
};
-#define _AD5446_CHANNEL(bits, storage, shift, ext) { \
+#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = 0, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = IIO_ST('u', (bits), (storage), (shift)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = (storage), \
+ .shift = (_shift), \
+ }, \
.ext_info = (ext), \
}
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 82e208f..64d7256 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -204,7 +204,12 @@ static const struct iio_info ad5449_info = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = (chan), \
- .scan_type = IIO_ST('u', (bits), 16, 12 - (bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 12 - (bits), \
+ }, \
}
#define DECLARE_AD5449_CHANNELS(name, bits) \
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index c0957a9..1e64493 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -47,14 +47,16 @@
* @vref_mv: actual reference voltage used
* @pwr_down_mask power down mask
* @pwr_down_mode current power down mode
+ * @data: transfer buffer
*/
-
struct ad5504_state {
struct spi_device *spi;
struct regulator *reg;
unsigned short vref_mv;
unsigned pwr_down_mask;
unsigned pwr_down_mode;
+
+ __be16 data[2] ____cacheline_aligned;
};
/**
@@ -66,31 +68,29 @@ enum ad5504_supported_device_ids {
ID_AD5501,
};
-static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
+static int ad5504_spi_write(struct ad5504_state *st, u8 addr, u16 val)
{
- u16 tmp = cpu_to_be16(AD5504_CMD_WRITE |
- AD5504_ADDR(addr) |
+ st->data[0] = cpu_to_be16(AD5504_CMD_WRITE | AD5504_ADDR(addr) |
(val & AD5504_RES_MASK));
- return spi_write(spi, (u8 *)&tmp, 2);
+ return spi_write(st->spi, &st->data[0], 2);
}
-static int ad5504_spi_read(struct spi_device *spi, u8 addr)
+static int ad5504_spi_read(struct ad5504_state *st, u8 addr)
{
- u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
- u16 val;
int ret;
- struct spi_transfer t = {
- .tx_buf = &tmp,
- .rx_buf = &val,
- .len = 2,
- };
- ret = spi_sync_transfer(spi, &t, 1);
-
+ struct spi_transfer t = {
+ .tx_buf = &st->data[0],
+ .rx_buf = &st->data[1],
+ .len = 2,
+ };
+
+ st->data[0] = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
+ ret = spi_sync_transfer(st->spi, &t, 1);
if (ret < 0)
return ret;
- return be16_to_cpu(val) & AD5504_RES_MASK;
+ return be16_to_cpu(st->data[1]) & AD5504_RES_MASK;
}
static int ad5504_read_raw(struct iio_dev *indio_dev,
@@ -104,7 +104,7 @@ static int ad5504_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = ad5504_spi_read(st->spi, chan->address);
+ ret = ad5504_spi_read(st, chan->address);
if (ret < 0)
return ret;
@@ -133,7 +133,7 @@ static int ad5504_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- return ad5504_spi_write(st->spi, chan->address, val);
+ return ad5504_spi_write(st, chan->address, val);
default:
ret = -EINVAL;
}
@@ -197,12 +197,12 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
else
st->pwr_down_mask &= ~(1 << chan->channel);
- ret = ad5504_spi_write(st->spi, AD5504_ADDR_CTRL,
+ ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
AD5504_DAC_PWR(st->pwr_down_mask));
/* writes to the CTRL register must be followed by a NOOP */
- ad5504_spi_write(st->spi, AD5504_ADDR_NOOP, 0);
+ ad5504_spi_write(st, AD5504_ADDR_NOOP, 0);
return ret ? ret : len;
}
@@ -261,7 +261,11 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.address = AD5504_ADDR_DAC(_chan), \
- .scan_type = IIO_ST('u', 12, 16, 0), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
.ext_info = ad5504_ext_info, \
}
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 774dd96..e8199cc 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -176,7 +176,12 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.address = (_chan), \
- .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
.ext_info = ad5624r_ext_info, \
}
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 30e506e..17aca4d 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -78,7 +78,7 @@ struct ad5686_state {
*/
union {
- u32 d32;
+ __be32 d32;
u8 d8[4];
} data[3] ____cacheline_aligned;
};
@@ -267,7 +267,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
{ },
};
-#define AD5868_CHANNEL(chan, bits, shift) { \
+#define AD5868_CHANNEL(chan, bits, _shift) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
@@ -275,7 +275,12 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
.address = AD5686_ADDR_DAC(chan), \
- .scan_type = IIO_ST('u', bits, 16, shift), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = (_shift), \
+ }, \
.ext_info = ad5686_ext_info, \
}
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 9a78d5a..a7c851f 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -97,7 +97,7 @@ struct ad5755_state {
*/
union {
- u32 d32;
+ __be32 d32;
u8 d8[4];
} data[2] ____cacheline_aligned;
};
@@ -392,7 +392,12 @@ static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
- .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
.ext_info = ad5755_ext_info, \
}
@@ -589,16 +594,7 @@ static int ad5755_probe(struct spi_device *spi)
if (ret)
return ret;
- return iio_device_register(indio_dev);
-}
-
-static int ad5755_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad5755_id[] = {
@@ -617,7 +613,6 @@ static struct spi_driver ad5755_driver = {
.owner = THIS_MODULE,
},
.probe = ad5755_probe,
- .remove = ad5755_remove,
.id_table = ad5755_id,
};
module_spi_driver(ad5755_driver);
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index a8ff5b2..d0d3816 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -83,7 +83,12 @@ enum ad5764_type {
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET), \
- .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)) \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
}
#define DECLARE_AD5764_CHANNELS(_name, _bits) \
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index d64acbd..ae49afe 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -91,6 +91,11 @@ struct ad5791_state {
unsigned ctrl;
unsigned pwr_down_mode;
bool pwr_down;
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
};
/**
@@ -104,48 +109,39 @@ enum ad5791_supported_device_ids {
ID_AD5791,
};
-static int ad5791_spi_write(struct spi_device *spi, u8 addr, u32 val)
+static int ad5791_spi_write(struct ad5791_state *st, u8 addr, u32 val)
{
- union {
- u32 d32;
- u8 d8[4];
- } data;
-
- data.d32 = cpu_to_be32(AD5791_CMD_WRITE |
+ st->data[0].d32 = cpu_to_be32(AD5791_CMD_WRITE |
AD5791_ADDR(addr) |
(val & AD5791_DAC_MASK));
- return spi_write(spi, &data.d8[1], 3);
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
}
-static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
+static int ad5791_spi_read(struct ad5791_state *st, u8 addr, u32 *val)
{
- union {
- u32 d32;
- u8 d8[4];
- } data[3];
int ret;
struct spi_transfer xfers[] = {
{
- .tx_buf = &data[0].d8[1],
+ .tx_buf = &st->data[0].d8[1],
.bits_per_word = 8,
.len = 3,
.cs_change = 1,
}, {
- .tx_buf = &data[1].d8[1],
- .rx_buf = &data[2].d8[1],
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
.bits_per_word = 8,
.len = 3,
},
};
- data[0].d32 = cpu_to_be32(AD5791_CMD_READ |
+ st->data[0].d32 = cpu_to_be32(AD5791_CMD_READ |
AD5791_ADDR(addr));
- data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
+ st->data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
- ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- *val = be32_to_cpu(data[2].d32);
+ *val = be32_to_cpu(st->data[2].d32);
return ret;
}
@@ -210,7 +206,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev,
}
st->pwr_down = pwr_down;
- ret = ad5791_spi_write(st->spi, AD5791_ADDR_CTRL, st->ctrl);
+ ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl);
return ret ? ret : len;
}
@@ -263,7 +259,7 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = ad5791_spi_read(st->spi, chan->address, val);
+ ret = ad5791_spi_read(st, chan->address, val);
if (ret)
return ret;
*val &= AD5791_DAC_MASK;
@@ -297,7 +293,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
{ },
};
-#define AD5791_CHAN(bits, shift) { \
+#define AD5791_CHAN(bits, _shift) { \
.type = IIO_VOLTAGE, \
.output = 1, \
.indexed = 1, \
@@ -306,7 +302,12 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
- .scan_type = IIO_ST('u', bits, 24, shift), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 24, \
+ .shift = (_shift), \
+ }, \
.ext_info = ad5791_ext_info, \
}
@@ -330,7 +331,7 @@ static int ad5791_write_raw(struct iio_dev *indio_dev,
val &= AD5791_RES_MASK(chan->scan_type.realbits);
val <<= chan->scan_type.shift;
- return ad5791_spi_write(st->spi, chan->address, val);
+ return ad5791_spi_write(st, chan->address, val);
default:
return -EINVAL;
@@ -393,7 +394,7 @@ static int ad5791_probe(struct spi_device *spi)
dev_warn(&spi->dev, "reference voltage unspecified\n");
}
- ret = ad5791_spi_write(spi, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
+ ret = ad5791_spi_write(st, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
if (ret)
goto error_disable_reg_neg;
@@ -405,7 +406,7 @@ static int ad5791_probe(struct spi_device *spi)
| ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) |
AD5791_CTRL_BIN2SC;
- ret = ad5791_spi_write(spi, AD5791_ADDR_CTRL, st->ctrl |
+ ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl |
AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI);
if (ret)
goto error_disable_reg_neg;
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index 6e19035..de76e6a 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -146,7 +146,6 @@ static const struct iio_info max517_info = {
.channel = (chan), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = IIO_ST('u', 8, 8, 0), \
}
static const struct iio_chan_spec max517_channels[] = {
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 9f57ae8..7d9f5c3 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -209,7 +209,6 @@ static const struct iio_chan_spec mcp4725_channel = {
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .scan_type = IIO_ST('u', 12, 16, 0),
.ext_info = mcp4725_ext_info,
};
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 445c2ae..8d08c7e 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -161,13 +161,7 @@ static int adis16130_probe(struct spi_device *spi)
indio_dev->info = &adis16130_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- return iio_device_register(indio_dev);
-}
-
-static int adis16130_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver adis16130_driver = {
@@ -176,7 +170,6 @@ static struct spi_driver adis16130_driver = {
.owner = THIS_MODULE,
},
.probe = adis16130_probe,
- .remove = adis16130_remove,
};
module_spi_driver(adis16130_driver);
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index 1e546ba..eb0e08e 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -434,23 +434,14 @@ static int adxrs450_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
indio_dev->name = spi->dev.driver->name;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
/* Get the device into a sane initial state */
ret = adxrs450_initial_setup(indio_dev);
if (ret)
- goto error_initial;
- return 0;
-error_initial:
- iio_device_unregister(indio_dev);
- return ret;
-}
-
-static int adxrs450_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
+ return ret;
return 0;
}
@@ -468,7 +459,6 @@ static struct spi_driver adxrs450_driver = {
.owner = THIS_MODULE,
},
.probe = adxrs450_probe,
- .remove = adxrs450_remove,
.id_table = adxrs450_id,
};
module_spi_driver(adxrs450_driver);
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index e54f0f4..59d6bc3 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -262,6 +262,17 @@ static int gyro_3d_parse_report(struct platform_device *pdev,
st->gyro[1].index, st->gyro[1].report_id,
st->gyro[2].index, st->gyro[2].report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ANGL_VELOCITY,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
return ret;
}
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
new file mode 100644
index 0000000..463c4d9
--- /dev/null
+++ b/drivers/iio/humidity/Kconfig
@@ -0,0 +1,15 @@
+#
+# humidity sensor drivers
+#
+menu "Humidity sensors"
+
+config DHT11
+ tristate "DHT11 (and compatible sensors) driver"
+ depends on GPIOLIB
+ help
+ This driver supports reading data via a single interrupt
+ generating GPIO line. Currently tested are DHT11 and DHT22.
+ Other sensors should work as well as long as they speak the
+ same protocol.
+
+endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
new file mode 100644
index 0000000..d5d36c0
--- /dev/null
+++ b/drivers/iio/humidity/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for IIO humidity sensor drivers
+#
+
+obj-$(CONFIG_DHT11) += dht11.o
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
new file mode 100644
index 0000000..d8771f5
--- /dev/null
+++ b/drivers/iio/humidity/dht11.c
@@ -0,0 +1,294 @@
+/*
+ * DHT11/DHT22 bit banging GPIO driver
+ *
+ * Copyright (c) Harald Geyer <harald@ccbib.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <linux/iio/iio.h>
+
+#define DRIVER_NAME "dht11"
+
+#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */
+
+#define DHT11_EDGES_PREAMBLE 4
+#define DHT11_BITS_PER_READ 40
+#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
+
+/* Data transmission timing (nano seconds) */
+#define DHT11_START_TRANSMISSION 18 /* ms */
+#define DHT11_SENSOR_RESPONSE 80000
+#define DHT11_START_BIT 50000
+#define DHT11_DATA_BIT_LOW 27000
+#define DHT11_DATA_BIT_HIGH 70000
+
+struct dht11 {
+ struct device *dev;
+
+ int gpio;
+ int irq;
+
+ struct completion completion;
+
+ s64 timestamp;
+ int temperature;
+ int humidity;
+
+ /* num_edges: -1 means "no transmission in progress" */
+ int num_edges;
+ struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
+};
+
+static unsigned char dht11_decode_byte(int *timing, int threshold)
+{
+ unsigned char ret = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ ret <<= 1;
+ if (timing[i] >= threshold)
+ ++ret;
+ }
+
+ return ret;
+}
+
+static int dht11_decode(struct dht11 *dht11, int offset)
+{
+ int i, t, timing[DHT11_BITS_PER_READ], threshold,
+ timeres = DHT11_SENSOR_RESPONSE;
+ unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
+
+ /* Calculate timestamp resolution */
+ for (i = 0; i < dht11->num_edges; ++i) {
+ t = dht11->edges[i].ts - dht11->edges[i-1].ts;
+ if (t > 0 && t < timeres)
+ timeres = t;
+ }
+ if (2*timeres > DHT11_DATA_BIT_HIGH) {
+ pr_err("dht11: timeresolution %d too bad for decoding\n",
+ timeres);
+ return -EIO;
+ }
+ threshold = DHT11_DATA_BIT_HIGH / timeres;
+ if (DHT11_DATA_BIT_LOW/timeres + 1 >= threshold)
+ pr_err("dht11: WARNING: decoding ambiguous\n");
+
+ /* scale down with timeres and check validity */
+ for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
+ t = dht11->edges[offset + 2*i + 2].ts -
+ dht11->edges[offset + 2*i + 1].ts;
+ if (!dht11->edges[offset + 2*i + 1].value)
+ return -EIO; /* lost synchronisation */
+ timing[i] = t / timeres;
+ }
+
+ hum_int = dht11_decode_byte(timing, threshold);
+ hum_dec = dht11_decode_byte(&timing[8], threshold);
+ temp_int = dht11_decode_byte(&timing[16], threshold);
+ temp_dec = dht11_decode_byte(&timing[24], threshold);
+ checksum = dht11_decode_byte(&timing[32], threshold);
+
+ if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
+ return -EIO;
+
+ dht11->timestamp = iio_get_time_ns();
+ if (hum_int < 20) { /* DHT22 */
+ dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
+ ((temp_int & 0x80) ? -100 : 100);
+ dht11->humidity = ((hum_int << 8) + hum_dec) * 100;
+ } else if (temp_dec == 0 && hum_dec == 0) { /* DHT11 */
+ dht11->temperature = temp_int * 1000;
+ dht11->humidity = hum_int * 1000;
+ } else {
+ dev_err(dht11->dev,
+ "Don't know how to decode data: %d %d %d %d\n",
+ hum_int, hum_dec, temp_int, temp_dec);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dht11_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long m)
+{
+ struct dht11 *dht11 = iio_priv(iio_dev);
+ int ret;
+
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
+ reinit_completion(&dht11->completion);
+
+ dht11->num_edges = 0;
+ ret = gpio_direction_output(dht11->gpio, 0);
+ if (ret)
+ goto err;
+ msleep(DHT11_START_TRANSMISSION);
+ ret = gpio_direction_input(dht11->gpio);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_killable_timeout(&dht11->completion,
+ HZ);
+ if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
+ dev_err(&iio_dev->dev,
+ "Only %d signal edges detected\n",
+ dht11->num_edges);
+ ret = -ETIMEDOUT;
+ }
+ if (ret < 0)
+ goto err;
+
+ ret = dht11_decode(dht11,
+ dht11->num_edges == DHT11_EDGES_PER_READ ?
+ DHT11_EDGES_PREAMBLE :
+ DHT11_EDGES_PREAMBLE - 2);
+ if (ret)
+ goto err;
+ }
+
+ ret = IIO_VAL_INT;
+ if (chan->type == IIO_TEMP)
+ *val = dht11->temperature;
+ else if (chan->type == IIO_HUMIDITYRELATIVE)
+ *val = dht11->humidity;
+ else
+ ret = -EINVAL;
+err:
+ dht11->num_edges = -1;
+ return ret;
+}
+
+static const struct iio_info dht11_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dht11_read_raw,
+};
+
+/*
+ * IRQ handler called on GPIO edges
+*/
+static irqreturn_t dht11_handle_irq(int irq, void *data)
+{
+ struct iio_dev *iio = data;
+ struct dht11 *dht11 = iio_priv(iio);
+
+ /* TODO: Consider making the handler safe for IRQ sharing */
+ if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
+ dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+ dht11->edges[dht11->num_edges++].value =
+ gpio_get_value(dht11->gpio);
+
+ if (dht11->num_edges >= DHT11_EDGES_PER_READ)
+ complete(&dht11->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec dht11_chan_spec[] = {
+ { .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
+ { .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }
+};
+
+static const struct of_device_id dht11_dt_ids[] = {
+ { .compatible = "dht11", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dht11_dt_ids);
+
+static int dht11_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct dht11 *dht11;
+ struct iio_dev *iio;
+ int ret;
+
+ iio = devm_iio_device_alloc(dev, sizeof(*dht11));
+ if (!iio) {
+ dev_err(dev, "Failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ dht11 = iio_priv(iio);
+ dht11->dev = dev;
+
+ dht11->gpio = ret = of_get_gpio(node, 0);
+ if (ret < 0)
+ return ret;
+ ret = devm_gpio_request_one(dev, dht11->gpio, GPIOF_IN, pdev->name);
+ if (ret)
+ return ret;
+
+ dht11->irq = gpio_to_irq(dht11->gpio);
+ if (dht11->irq < 0) {
+ dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
+ return -EINVAL;
+ }
+ ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ pdev->name, iio);
+ if (ret)
+ return ret;
+
+ dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->num_edges = -1;
+
+ platform_set_drvdata(pdev, iio);
+
+ init_completion(&dht11->completion);
+ iio->name = pdev->name;
+ iio->dev.parent = &pdev->dev;
+ iio->info = &dht11_iio_info;
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->channels = dht11_chan_spec;
+ iio->num_channels = ARRAY_SIZE(dht11_chan_spec);
+
+ return devm_iio_device_register(dev, iio);
+}
+
+static struct platform_driver dht11_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = dht11_dt_ids,
+ },
+ .probe = dht11_probe,
+};
+
+module_platform_driver(dht11_driver);
+
+MODULE_AUTHOR("Harald Geyer <harald@ccbib.org>");
+MODULE_DESCRIPTION("DHT11 humidity/temperature sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 7f9152c..c67d83b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -37,6 +37,14 @@ static bool iio_buffer_is_active(struct iio_buffer *buf)
return !list_empty(&buf->buffer_list);
}
+static bool iio_buffer_data_available(struct iio_buffer *buf)
+{
+ if (buf->access->data_available)
+ return buf->access->data_available(buf);
+
+ return buf->stufftoread;
+}
+
/**
* iio_buffer_read_first_n_outer() - chrdev read for buffer access
*
@@ -48,13 +56,34 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ int ret;
if (!indio_dev->info)
return -ENODEV;
if (!rb || !rb->access->read_first_n)
return -EINVAL;
- return rb->access->read_first_n(rb, n, buf);
+
+ do {
+ if (!iio_buffer_data_available(rb)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rb->pollq,
+ iio_buffer_data_available(rb) ||
+ indio_dev->info == NULL);
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+ }
+
+ ret = rb->access->read_first_n(rb, n, buf);
+ if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+ ret = -EAGAIN;
+ } while (ret == 0);
+
+ return ret;
}
/**
@@ -70,7 +99,7 @@ unsigned int iio_buffer_poll(struct file *filp,
return -ENODEV;
poll_wait(filp, &rb->pollq, wait);
- if (rb->stufftoread)
+ if (iio_buffer_data_available(rb))
return POLLIN | POLLRDNORM;
/* need a way of knowing if there may be enough data... */
return 0;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 18f72e3..acc911a 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -69,6 +69,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
+ [IIO_HUMIDITYRELATIVE] = "humidityrelative",
};
static const char * const iio_modifier_names[] = {
@@ -107,6 +108,11 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_INT_TIME] = "integration_time",
};
+/**
+ * iio_find_channel_from_si() - get channel from its scan index
+ * @indio_dev: device
+ * @si: scan index to match
+ */
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
{
@@ -922,6 +928,10 @@ struct device_type iio_device_type = {
.release = iio_dev_release,
};
+/**
+ * iio_device_alloc() - allocate an iio_dev from a driver
+ * @sizeof_priv: Space to allocate for private structure.
+ **/
struct iio_dev *iio_device_alloc(int sizeof_priv)
{
struct iio_dev *dev;
@@ -962,6 +972,10 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
}
EXPORT_SYMBOL(iio_device_alloc);
+/**
+ * iio_device_free() - free an iio_dev from a driver
+ * @dev: the iio_dev associated with the device
+ **/
void iio_device_free(struct iio_dev *dev)
{
if (dev)
@@ -984,6 +998,20 @@ static int devm_iio_device_match(struct device *dev, void *res, void *data)
return *r == data;
}
+/**
+ * devm_iio_device_alloc - Resource-managed iio_device_alloc()
+ * @dev: Device to allocate iio_dev for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed iio_device_alloc. iio_dev allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_dev allocated with this function needs to be freed separately,
+ * devm_iio_device_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_dev on success, NULL on failure.
+ */
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
{
struct iio_dev **ptr, *iio_dev;
@@ -1006,6 +1034,13 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
}
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
+/**
+ * devm_iio_device_free - Resource-managed iio_device_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_dev: the iio_dev associated with the device
+ *
+ * Free iio_dev allocated with devm_iio_device_alloc().
+ */
void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
{
int rc;
@@ -1080,6 +1115,10 @@ static const struct file_operations iio_buffer_fileops = {
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @indio_dev: Device structure filled by the device driver
+ **/
int iio_device_register(struct iio_dev *indio_dev)
{
int ret;
@@ -1141,6 +1180,10 @@ error_ret:
}
EXPORT_SYMBOL(iio_device_register);
+/**
+ * iio_device_unregister() - unregister a device from the IIO subsystem
+ * @indio_dev: Device structure representing the device.
+ **/
void iio_device_unregister(struct iio_dev *indio_dev)
{
mutex_lock(&indio_dev->info_exist_lock);
@@ -1161,6 +1204,65 @@ void iio_device_unregister(struct iio_dev *indio_dev)
mutex_unlock(&indio_dev->info_exist_lock);
}
EXPORT_SYMBOL(iio_device_unregister);
+
+static void devm_iio_device_unreg(struct device *dev, void *res)
+{
+ iio_device_unregister(*(struct iio_dev **)res);
+}
+
+/**
+ * devm_iio_device_register - Resource-managed iio_device_register()
+ * @dev: Device to allocate iio_dev for
+ * @indio_dev: Device structure filled by the device driver
+ *
+ * Managed iio_device_register. The IIO device registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_device_register() internally. Refer to that function for more
+ * information.
+ *
+ * If an iio_dev registered with this function needs to be unregistered
+ * separately, devm_iio_device_unregister() must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
+{
+ struct iio_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = indio_dev;
+ ret = iio_device_register(indio_dev);
+ if (!ret)
+ devres_add(dev, ptr);
+ else
+ devres_free(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_register);
+
+/**
+ * devm_iio_device_unregister - Resource-managed iio_device_unregister()
+ * @dev: Device this iio_dev belongs to
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Unregister iio_dev registered with devm_iio_device_register().
+ */
+void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_device_unreg,
+ devm_iio_device_match, indio_dev);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
+
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index c10eab6..c9c1419 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -42,6 +42,12 @@ struct iio_event_interface {
struct attribute_group group;
};
+/**
+ * iio_push_event() - try to add event to the list for userspace reading
+ * @indio_dev: IIO device structure
+ * @ev_code: What event
+ * @timestamp: When the event occurred
+ **/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
@@ -236,13 +242,9 @@ static ssize_t iio_ev_state_store(struct device *dev,
if (ret < 0)
return ret;
- if (indio_dev->info->write_event_config)
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address, val);
- else
- ret = indio_dev->info->write_event_config_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr), val);
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), val);
return (ret < 0) ? ret : len;
}
@@ -255,13 +257,9 @@ static ssize_t iio_ev_state_show(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val;
- if (indio_dev->info->read_event_config)
- val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
- else
- val = indio_dev->info->read_event_config_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr));
+ val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr));
if (val < 0)
return val;
else
@@ -277,21 +275,13 @@ static ssize_t iio_ev_value_show(struct device *dev,
int val, val2;
int ret;
- if (indio_dev->info->read_event_value) {
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
- return sprintf(buf, "%d\n", val);
- } else {
- ret = indio_dev->info->read_event_value_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
- &val, &val2);
- if (ret < 0)
- return ret;
- return iio_format_value(buf, ret, val, val2);
- }
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ &val, &val2);
+ if (ret < 0)
+ return ret;
+ return iio_format_value(buf, ret, val, val2);
}
static ssize_t iio_ev_value_store(struct device *dev,
@@ -304,25 +294,16 @@ static ssize_t iio_ev_value_store(struct device *dev,
int val, val2;
int ret;
- if (!indio_dev->info->write_event_value &&
- !indio_dev->info->write_event_value_new)
+ if (!indio_dev->info->write_event_value)
return -EINVAL;
- if (indio_dev->info->write_event_value) {
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
- ret = indio_dev->info->write_event_value(indio_dev,
- this_attr->address, val);
- } else {
- ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
- if (ret)
- return ret;
- ret = indio_dev->info->write_event_value_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
- val, val2);
- }
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ val, val2);
if (ret < 0)
return ret;
@@ -371,7 +352,7 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
return attrcount;
}
-static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev,
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
@@ -414,89 +395,6 @@ error_ret:
return ret;
}
-static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- int ret = 0, i, attrcount = 0;
- u64 mask = 0;
- char *postfix;
- if (!chan->event_mask)
- return 0;
-
- for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
- postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- if (chan->modified)
- mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
- else if (chan->differential)
- mask = IIO_EVENT_CODE(chan->type,
- 0, 0,
- i%IIO_EV_DIR_MAX,
- i/IIO_EV_DIR_MAX,
- 0,
- chan->channel,
- chan->channel2);
- else
- mask = IIO_UNMOD_EVENT_CODE(chan->type,
- chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
-
- ret = __iio_add_chan_devattr(postfix,
- chan,
- &iio_ev_state_show,
- iio_ev_state_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = __iio_add_chan_devattr(postfix, chan,
- iio_ev_value_show,
- iio_ev_value_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- }
- ret = attrcount;
-error_ret:
- return ret;
-}
-
-
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- if (chan->event_mask)
- return iio_device_add_event_sysfs_old(indio_dev, chan);
- else
- return iio_device_add_event_sysfs_new(indio_dev, chan);
-}
-
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
int j, ret, attrcount = 0;
@@ -517,8 +415,6 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
int j;
for (j = 0; j < indio_dev->num_channels; j++) {
- if (indio_dev->channels[j].event_mask != 0)
- return true;
if (indio_dev->channels[j].num_event_specs != 0)
return true;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index bf5e70a..766fab2 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -55,15 +55,7 @@ static struct attribute *iio_trig_dev_attrs[] = {
&dev_attr_name.attr,
NULL,
};
-
-static struct attribute_group iio_trig_attr_group = {
- .attrs = iio_trig_dev_attrs,
-};
-
-static const struct attribute_group *iio_trig_attr_groups[] = {
- &iio_trig_attr_group,
- NULL
-};
+ATTRIBUTE_GROUPS(iio_trig_dev);
int iio_trigger_register(struct iio_trigger *trig_info)
{
@@ -318,7 +310,7 @@ static ssize_t iio_trigger_read_current(struct device *dev,
* iio_trigger_write_current() - trigger consumer sysfs set current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
- * used for this device to be specified at run time based on the triggers
+ * used for this device to be specified at run time based on the trigger's
* name.
**/
static ssize_t iio_trigger_write_current(struct device *dev,
@@ -356,7 +348,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
indio_dev->trig = trig;
- if (oldtrig && indio_dev->trig != oldtrig)
+ if (oldtrig)
iio_trigger_put(oldtrig);
if (indio_dev->trig)
iio_trigger_get(indio_dev->trig);
@@ -403,7 +395,7 @@ static void iio_trig_release(struct device *device)
static struct device_type iio_trig_type = {
.release = iio_trig_release,
- .groups = iio_trig_attr_groups,
+ .groups = iio_trig_dev_groups,
};
static void iio_trig_subirqmask(struct irq_data *d)
@@ -506,6 +498,23 @@ static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
return *r == data;
}
+/**
+ * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
+ * @dev: Device to allocate iio_trigger for
+ * @fmt: trigger name format. If it includes format
+ * specifiers, the additional arguments following
+ * format are formatted and inserted in the resulting
+ * string replacing their respective specifiers.
+ *
+ * Managed iio_trigger_alloc. iio_trigger allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_trigger allocated with this function needs to be freed separately,
+ * devm_iio_trigger_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_trigger on success, NULL on failure.
+ */
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...)
{
@@ -532,6 +541,13 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
+/**
+ * devm_iio_trigger_free - Resource-managed iio_trigger_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_trig: the iio_trigger associated with the device
+ *
+ * Free iio_trigger allocated with devm_iio_trigger_alloc().
+ */
void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
{
int rc;
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 95c6fc8..7134e8a 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -42,7 +42,6 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
} else {
kfifo_reset_out(&buf->kf);
}
- r->stufftoread = false;
mutex_unlock(&buf->user_lock);
return ret;
@@ -108,7 +107,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
ret = kfifo_in(&kf->kf, data, 1);
if (ret != 1)
return -EBUSY;
- r->stufftoread = true;
+
wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
return 0;
@@ -127,13 +126,6 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
ret = -EINVAL;
else
ret = kfifo_to_user(&kf->kf, buf, n, &copied);
-
- if (kfifo_is_empty(&kf->kf))
- r->stufftoread = false;
- /* verify it is still empty to avoid race */
- if (!kfifo_is_empty(&kf->kf))
- r->stufftoread = true;
-
mutex_unlock(&kf->user_lock);
if (ret < 0)
return ret;
@@ -141,6 +133,18 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
return copied;
}
+static bool iio_kfifo_buf_data_available(struct iio_buffer *r)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ bool empty;
+
+ mutex_lock(&kf->user_lock);
+ empty = kfifo_is_empty(&kf->kf);
+ mutex_unlock(&kf->user_lock);
+
+ return !empty;
+}
+
static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
{
struct iio_kfifo *kf = iio_to_kfifo(buffer);
@@ -153,6 +157,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
+ .data_available = iio_kfifo_buf_data_available,
.request_update = &iio_request_update_kfifo,
.get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index a022f27..d12b2a0 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -27,6 +27,17 @@ config APDS9300
To compile this driver as a module, choose M here: the
module will be called apds9300.
+config CM32181
+ depends on I2C
+ tristate "CM32181 driver"
+ help
+ Say Y here if you use cm32181.
+ This option enables ambient light sensor using
+ Capella cm32181 device driver.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cm32181.
+
config CM36651
depends on I2C
tristate "CM36651 driver"
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index daa327f..60e35ac 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_APDS9300) += apds9300.o
+obj-$(CONFIG_CM32181) += cm32181.o
obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 83d15c5..f306847 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -155,7 +155,12 @@ done:
BIT(IIO_CHAN_INFO_INT_TIME), \
.channel2 = (IIO_MOD_LIGHT_##_color), \
.scan_index = (_scan_idx), \
- .scan_type = IIO_ST('u', 10, 16, 0), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const struct iio_chan_spec adjd_s311_channels[] = {
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 51097bb..9ddde0c 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -344,10 +344,10 @@ static const struct iio_info apds9300_info_no_irq = {
static const struct iio_info apds9300_info = {
.driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
- .read_event_value_new = apds9300_read_thresh,
- .write_event_value_new = apds9300_write_thresh,
- .read_event_config_new = apds9300_read_interrupt_config,
- .write_event_config_new = apds9300_write_interrupt_config,
+ .read_event_value = apds9300_read_thresh,
+ .write_event_value = apds9300_write_thresh,
+ .read_event_config = apds9300_read_interrupt_config,
+ .write_event_config = apds9300_write_interrupt_config,
};
static const struct iio_event_spec apds9300_event_spec[] = {
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
new file mode 100644
index 0000000..f17b4e6
--- /dev/null
+++ b/drivers/iio/light/cm32181.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2013 Capella Microsystems Inc.
+ * Author: Kevin Tsai <ktsai@capellamicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/init.h>
+
+/* Registers Address */
+#define CM32181_REG_ADDR_CMD 0x00
+#define CM32181_REG_ADDR_ALS 0x04
+#define CM32181_REG_ADDR_STATUS 0x06
+#define CM32181_REG_ADDR_ID 0x07
+
+/* Number of Configurable Registers */
+#define CM32181_CONF_REG_NUM 0x01
+
+/* CMD register */
+#define CM32181_CMD_ALS_ENABLE 0x00
+#define CM32181_CMD_ALS_DISABLE 0x01
+#define CM32181_CMD_ALS_INT_EN 0x02
+
+#define CM32181_CMD_ALS_IT_SHIFT 6
+#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT)
+#define CM32181_CMD_ALS_IT_DEFAULT (0x00 << CM32181_CMD_ALS_IT_SHIFT)
+
+#define CM32181_CMD_ALS_SM_SHIFT 11
+#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT)
+#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT)
+
+#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */
+#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
+#define CM32181_CALIBSCALE_DEFAULT 1000
+#define CM32181_CALIBSCALE_RESOLUTION 1000
+#define MLUX_PER_LUX 1000
+
+static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = {
+ CM32181_REG_ADDR_CMD,
+};
+
+static const int als_it_bits[] = {12, 8, 0, 1, 2, 3};
+static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000,
+ 800000};
+
+struct cm32181_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ u16 conf_regs[CM32181_CONF_REG_NUM];
+ int calibscale;
+};
+
+/**
+ * cm32181_reg_init() - Initialize CM32181 registers
+ * @cm32181: pointer of struct cm32181.
+ *
+ * Initialize CM32181 ambient light sensor register to default values.
+ *
+ * Return: 0 for success; otherwise for error code.
+ */
+static int cm32181_reg_init(struct cm32181_chip *cm32181)
+{
+ struct i2c_client *client = cm32181->client;
+ int i;
+ s32 ret;
+
+ ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ID);
+ if (ret < 0)
+ return ret;
+
+ /* check device ID */
+ if ((ret & 0xFF) != 0x81)
+ return -ENODEV;
+
+ /* Default Values */
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE |
+ CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT;
+ cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT;
+
+ /* Initialize registers*/
+ for (i = 0; i < CM32181_CONF_REG_NUM; i++) {
+ ret = i2c_smbus_write_word_data(client, cm32181_reg[i],
+ cm32181->conf_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * cm32181_read_als_it() - Get sensor integration time (ms)
+ * @cm32181: pointer of struct cm32181
+ * @val: pointer of int to load the als_it value.
+ *
+ * Report the current integartion time by millisecond.
+ *
+ * Return: IIO_VAL_INT for success, otherwise -EINVAL.
+ */
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
+{
+ u16 als_it;
+ int i;
+
+ als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD];
+ als_it &= CM32181_CMD_ALS_IT_MASK;
+ als_it >>= CM32181_CMD_ALS_IT_SHIFT;
+ for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
+ if (als_it == als_it_bits[i]) {
+ *val = als_it_value[i];
+ return IIO_VAL_INT;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm32181_write_als_it() - Write sensor integration time
+ * @cm32181: pointer of struct cm32181.
+ * @val: integration time by millisecond.
+ *
+ * Convert integration time (ms) to sensor value.
+ *
+ * Return: i2c_smbus_write_word_data command return value.
+ */
+static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
+{
+ struct i2c_client *client = cm32181->client;
+ u16 als_it;
+ int ret, i, n;
+
+ n = ARRAY_SIZE(als_it_value);
+ for (i = 0; i < n; i++)
+ if (val <= als_it_value[i])
+ break;
+ if (i >= n)
+ i = n - 1;
+
+ als_it = als_it_bits[i];
+ als_it <<= CM32181_CMD_ALS_IT_SHIFT;
+
+ mutex_lock(&cm32181->lock);
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] &=
+ ~CM32181_CMD_ALS_IT_MASK;
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] |=
+ als_it;
+ ret = i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
+ mutex_unlock(&cm32181->lock);
+
+ return ret;
+}
+
+/**
+ * cm32181_get_lux() - report current lux value
+ * @cm32181: pointer of struct cm32181.
+ *
+ * Convert sensor raw data to lux. It depends on integration
+ * time and claibscale variable.
+ *
+ * Return: Positive value is lux, otherwise is error code.
+ */
+static int cm32181_get_lux(struct cm32181_chip *cm32181)
+{
+ struct i2c_client *client = cm32181->client;
+ int ret;
+ int als_it;
+ unsigned long lux;
+
+ ret = cm32181_read_als_it(cm32181, &als_it);
+ if (ret < 0)
+ return -EINVAL;
+
+ lux = CM32181_MLUX_PER_BIT;
+ lux *= CM32181_MLUX_PER_BIT_BASE_IT;
+ lux /= als_it;
+
+ ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS);
+ if (ret < 0)
+ return ret;
+
+ lux *= ret;
+ lux *= cm32181->calibscale;
+ lux /= CM32181_CALIBSCALE_RESOLUTION;
+ lux /= MLUX_PER_LUX;
+
+ if (lux > 0xFFFF)
+ lux = 0xFFFF;
+
+ return lux;
+}
+
+static int cm32181_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cm32181_chip *cm32181 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = cm32181_get_lux(cm32181);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = cm32181->calibscale;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = cm32181_read_als_it(cm32181, val);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int cm32181_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cm32181_chip *cm32181 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ cm32181->calibscale = val;
+ return val;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = cm32181_write_als_it(cm32181, val);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm32181_get_it_available() - Get available ALS IT value
+ * @dev: pointer of struct device.
+ * @attr: pointer of struct device_attribute.
+ * @buf: pointer of return string buffer.
+ *
+ * Display the available integration time values by millisecond.
+ *
+ * Return: string length.
+ */
+static ssize_t cm32181_get_it_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, n, len;
+
+ n = ARRAY_SIZE(als_it_value);
+ for (i = 0, len = 0; i < n; i++)
+ len += sprintf(buf + len, "%d ", als_it_value[i]);
+ return len + sprintf(buf + len, "\n");
+}
+
+static const struct iio_chan_spec cm32181_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ }
+};
+
+static IIO_DEVICE_ATTR(in_illuminance_integration_time_available,
+ S_IRUGO, cm32181_get_it_available, NULL, 0);
+
+static struct attribute *cm32181_attributes[] = {
+ &iio_dev_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm32181_attribute_group = {
+ .attrs = cm32181_attributes
+};
+
+static const struct iio_info cm32181_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cm32181_read_raw,
+ .write_raw = &cm32181_write_raw,
+ .attrs = &cm32181_attribute_group,
+};
+
+static int cm32181_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm32181_chip *cm32181;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181));
+ if (!indio_dev) {
+ dev_err(&client->dev, "devm_iio_device_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ cm32181 = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ cm32181->client = client;
+
+ mutex_init(&cm32181->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = cm32181_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cm32181_channels);
+ indio_dev->info = &cm32181_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = cm32181_reg_init(cm32181);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: register init failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: regist device failed\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cm32181_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ return 0;
+}
+
+static const struct i2c_device_id cm32181_id[] = {
+ { "cm32181", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, cm32181_id);
+
+static const struct of_device_id cm32181_of_match[] = {
+ { .compatible = "capella,cm32181" },
+ { }
+};
+
+static struct i2c_driver cm32181_driver = {
+ .driver = {
+ .name = "cm32181",
+ .of_match_table = of_match_ptr(cm32181_of_match),
+ .owner = THIS_MODULE,
+ },
+ .id_table = cm32181_id,
+ .probe = cm32181_probe,
+ .remove = cm32181_remove,
+};
+
+module_i2c_driver(cm32181_driver);
+
+MODULE_AUTHOR("Kevin Tsai <ktsai@capellamicro.com>");
+MODULE_DESCRIPTION("CM32181 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 0922e39..0a142af 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -488,7 +488,11 @@ static int cm36651_write_raw(struct iio_dev *indio_dev,
}
static int cm36651_read_prox_thresh(struct iio_dev *indio_dev,
- u64 event_code, int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
@@ -498,7 +502,11 @@ static int cm36651_read_prox_thresh(struct iio_dev *indio_dev,
}
static int cm36651_write_prox_thresh(struct iio_dev *indio_dev,
- u64 event_code, int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
struct i2c_client *client = cm36651->client;
@@ -520,7 +528,10 @@ static int cm36651_write_prox_thresh(struct iio_dev *indio_dev,
}
static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
- u64 event_code, int state)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
int cmd, ret = -EINVAL;
@@ -536,7 +547,9 @@ static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
}
static int cm36651_read_prox_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
int event_en;
@@ -559,12 +572,22 @@ static int cm36651_read_prox_event_config(struct iio_dev *indio_dev,
.channel2 = IIO_MOD_LIGHT_##_color, \
} \
+static const struct iio_event_spec cm36651_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }
+};
+
static const struct iio_chan_spec cm36651_channels[] = {
{
.type = IIO_PROXIMITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_INT_TIME),
- .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER)
+ .event_spec = cm36651_event_spec,
+ .num_event_specs = ARRAY_SIZE(cm36651_event_spec),
},
CM36651_LIGHT_CHANNEL(RED, CM36651_LIGHT_CHANNEL_IDX_RED),
CM36651_LIGHT_CHANNEL(GREEN, CM36651_LIGHT_CHANNEL_IDX_GREEN),
@@ -693,7 +716,7 @@ static const struct of_device_id cm36651_of_match[] = {
static struct i2c_driver cm36651_driver = {
.driver = {
.name = "cm36651",
- .of_match_table = of_match_ptr(cm36651_of_match),
+ .of_match_table = cm36651_of_match,
.owner = THIS_MODULE,
},
.probe = cm36651_probe,
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index dc79835..5ea4a03 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1388,10 +1388,10 @@ static const struct iio_chan_spec gp2ap020a00f_channels[] = {
static const struct iio_info gp2ap020a00f_info = {
.read_raw = &gp2ap020a00f_read_raw,
- .read_event_value_new = &gp2ap020a00f_read_event_val,
- .read_event_config_new = &gp2ap020a00f_read_event_config,
- .write_event_value_new = &gp2ap020a00f_write_event_val,
- .write_event_config_new = &gp2ap020a00f_write_event_config,
+ .read_event_value = &gp2ap020a00f_read_event_val,
+ .read_event_config = &gp2ap020a00f_read_event_config,
+ .write_event_value = &gp2ap020a00f_write_event_val,
+ .write_event_config = &gp2ap020a00f_write_event_config,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 8e8b9d7..621541f 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -229,6 +229,17 @@ static int als_parse_report(struct platform_device *pdev,
dev_dbg(&pdev->dev, "als %x:%x\n", st->als_illum.index,
st->als_illum.report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_LIGHT,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
return ret;
}
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 45df220..887fecf 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -67,7 +67,12 @@ struct tcs3472_data {
.channel2 = IIO_MOD_LIGHT_##_color, \
.address = _addr, \
.scan_index = _si, \
- .scan_type = IIO_ST('u', 16, 16, 0), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const int tcs3472_agains[] = { 1, 4, 16, 60 };
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 5e5d9de..3d81101 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -702,10 +702,10 @@ static const struct iio_info tsl2563_info = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
- .read_event_value_new = &tsl2563_read_thresh,
- .write_event_value_new = &tsl2563_write_thresh,
- .read_event_config_new = &tsl2563_read_interrupt_config,
- .write_event_config_new = &tsl2563_write_interrupt_config,
+ .read_event_value = &tsl2563_read_thresh,
+ .write_event_value = &tsl2563_write_thresh,
+ .read_event_config = &tsl2563_read_interrupt_config,
+ .write_event_config = &tsl2563_write_interrupt_config,
};
static int tsl2563_probe(struct i2c_client *client,
@@ -714,6 +714,7 @@ static int tsl2563_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct tsl2563_chip *chip;
struct tsl2563_platform_data *pdata = client->dev.platform_data;
+ struct device_node *np = client->dev.of_node;
int err = 0;
u8 id = 0;
@@ -750,6 +751,9 @@ static int tsl2563_probe(struct i2c_client *client,
if (pdata)
chip->cover_comp_gain = pdata->cover_comp_gain;
+ else if (np)
+ of_property_read_u32(np, "amstaos,cover-comp-gain",
+ &chip->cover_comp_gain);
else
chip->cover_comp_gain = 1;
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index ecb3341..d948c47 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -56,7 +56,7 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
int tries = 20;
- u16 buf;
+ __be16 buf;
int ret;
ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
@@ -179,13 +179,7 @@ static int vcnl4000_probe(struct i2c_client *client,
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- return iio_device_register(indio_dev);
-}
-
-static int vcnl4000_remove(struct i2c_client *client)
-{
- iio_device_unregister(i2c_get_clientdata(client));
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static struct i2c_driver vcnl4000_driver = {
@@ -194,7 +188,6 @@ static struct i2c_driver vcnl4000_driver = {
.owner = THIS_MODULE,
},
.probe = vcnl4000_probe,
- .remove = vcnl4000_remove,
.id_table = vcnl4000_id,
};
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index b26e102..6d162b7 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -263,6 +263,18 @@ static int magn_3d_parse_report(struct platform_device *pdev,
st->magn[1].index, st->magn[1].report_id,
st->magn[2].index, st->magn[2].report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
return ret;
}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index becf544..4b65b6d 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -266,7 +266,11 @@ static const struct iio_chan_spec mag3110_channels[] = {
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.scan_index = 3,
- .scan_type = IIO_ST('s', 8, 8, 0),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 8,
+ .storagebits = 8,
+ },
},
IIO_CHAN_SOFT_TIMESTAMP(4),
};
diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
new file mode 100644
index 0000000..58c62c8
--- /dev/null
+++ b/drivers/iio/orientation/Kconfig
@@ -0,0 +1,19 @@
+#
+# Inclinometer sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Inclinometer sensors"
+
+config HID_SENSOR_INCLINOMETER_3D
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Inclinometer 3D"
+ help
+ Say yes here to build support for the HID SENSOR
+ Inclinometer 3D.
+
+endmenu
diff --git a/drivers/iio/orientation/Makefile b/drivers/iio/orientation/Makefile
new file mode 100644
index 0000000..2c97572
--- /dev/null
+++ b/drivers/iio/orientation/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O Inclinometer sensor drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_INCLINOMETER_3D) += hid-sensor-incl-3d.o
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
new file mode 100644
index 0000000..070feab
--- /dev/null
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -0,0 +1,428 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum incl_3d_channel {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ INCLI_3D_CHANNEL_MAX,
+};
+
+struct incl_3d_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX];
+ u32 incl_val[INCLI_3D_CHANNEL_MAX];
+};
+
+static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_ORIENT_TILT_X,
+ HID_USAGE_SENSOR_ORIENT_TILT_Y,
+ HID_USAGE_SENSOR_ORIENT_TILT_Z
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec incl_3d_channels[] = {
+ {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_X,
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Y,
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Z,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void incl_3d_adjust_channel_bit_mask(struct iio_chan_spec *chan,
+ int size)
+{
+ chan->scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ chan->scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ chan->scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int incl_3d_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ report_id =
+ incl_state->incl[chan->scan_index].report_id;
+ address = incl_3d_addresses[chan->scan_index];
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ incl_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D, address,
+ report_id);
+ else {
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = incl_state->incl[CHANNEL_SCAN_INDEX_X].units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ incl_state->incl[CHANNEL_SCAN_INDEX_X].unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int incl_3d_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info incl_3d_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &incl_3d_read_raw,
+ .write_raw = &incl_3d_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, (u8 *)data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "incl_3d_proc_event [%d]\n",
+ incl_state->common_attributes.data_ready);
+ if (incl_state->common_attributes.data_ready)
+ hid_sensor_push_data(indio_dev,
+ (u8 *)incl_state->incl_val,
+ sizeof(incl_state->incl_val));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ORIENT_TILT_X:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_TILT_Y:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_TILT_Z:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int incl_3d_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct incl_3d_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_X,
+ &st->incl[CHANNEL_SCAN_INDEX_X]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_X],
+ st->incl[CHANNEL_SCAN_INDEX_X].size);
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_Y,
+ &st->incl[CHANNEL_SCAN_INDEX_Y]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Y],
+ st->incl[CHANNEL_SCAN_INDEX_Y].size);
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_Z,
+ &st->incl[CHANNEL_SCAN_INDEX_Z]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Z],
+ st->incl[CHANNEL_SCAN_INDEX_Z].size);
+
+ dev_dbg(&pdev->dev, "incl_3d %x:%x, %x:%x, %x:%x\n",
+ st->incl[0].index,
+ st->incl[0].report_id,
+ st->incl[1].index, st->incl[1].report_id,
+ st->incl[2].index, st->incl[2].report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_incl_3d_probe(struct platform_device *pdev)
+{
+ int ret;
+ static char *name = "incli_3d";
+ struct iio_dev *indio_dev;
+ struct incl_3d_state *incl_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct incl_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ incl_state = iio_priv(indio_dev);
+ incl_state->common_attributes.hsdev = hsdev;
+ incl_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &incl_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(incl_3d_channels, sizeof(incl_3d_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = incl_3d_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_INCLINOMETER_3D, incl_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(incl_3d_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &incl_3d_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ incl_state->common_attributes.data_ready = false;
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &incl_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ incl_state->callbacks.send_event = incl_3d_proc_event;
+ incl_state->callbacks.capture_sample = incl_3d_capture_sample;
+ incl_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &incl_state->callbacks);
+ if (ret) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return 0;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&incl_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_incl_3d_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&incl_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_incl_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200086",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_incl_3d_ids);
+
+static struct platform_driver hid_incl_3d_platform_driver = {
+ .id_table = hid_incl_3d_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_incl_3d_probe,
+ .remove = hid_incl_3d_remove,
+};
+module_platform_driver(hid_incl_3d_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Inclinometer 3D");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 4f2e0f9..a8b9cae 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,18 @@
menu "Pressure sensors"
+config MPL3115
+ tristate "Freescale MPL3115A2 pressure sensor driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Freescale MPL3115A2
+ pressure sensor / altimeter.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mpl3115.
+
config IIO_ST_PRESS
tristate "STMicroelectronics pressure sensor Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index be71464..42bb9fc 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
new file mode 100644
index 0000000..ac8c8ab
--- /dev/null
+++ b/drivers/iio/pressure/mpl3115.c
@@ -0,0 +1,329 @@
+/*
+ * mpl3115.c - Support for Freescale MPL3115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * TODO: FIFO buffer, altimeter mode, oversampling, continuous mode,
+ * interrupts, user offset correction, raw mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#define MPL3115_STATUS 0x00
+#define MPL3115_OUT_PRESS 0x01 /* MSB first, 20 bit */
+#define MPL3115_OUT_TEMP 0x04 /* MSB first, 12 bit */
+#define MPL3115_WHO_AM_I 0x0c
+#define MPL3115_CTRL_REG1 0x26
+
+#define MPL3115_DEVICE_ID 0xc4
+
+#define MPL3115_STATUS_PRESS_RDY BIT(2)
+#define MPL3115_STATUS_TEMP_RDY BIT(1)
+
+#define MPL3115_CTRL_RESET BIT(2) /* software reset */
+#define MPL3115_CTRL_OST BIT(1) /* initiate measurement */
+#define MPL3115_CTRL_ACTIVE BIT(0) /* continuous measurement */
+#define MPL3115_CTRL_OS_258MS (BIT(5) | BIT(4)) /* 64x oversampling */
+
+struct mpl3115_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 ctrl_reg1;
+};
+
+static int mpl3115_request(struct mpl3115_data *data)
+{
+ int ret, tries = 15;
+
+ /* trigger measurement */
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1 | MPL3115_CTRL_OST);
+ if (ret < 0)
+ return ret;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, MPL3115_CTRL_REG1);
+ if (ret < 0)
+ return ret;
+ /* wait for data ready, i.e. OST cleared */
+ if (!(ret & MPL3115_CTRL_OST))
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mpl3115_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ s32 tmp = 0;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_PRESSURE: /* in 0.25 pascal / LSB */
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
+ return IIO_VAL_INT;
+ case IIO_TEMP: /* in 0.0625 celsius / LSB */
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 250; /* want kilopascal */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = 62500;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */
+ int ret, pos = 0;
+
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+
+ memset(buffer, 0, sizeof(buffer));
+ if (test_bit(0, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS, 3, &buffer[pos]);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+ pos += 4;
+ }
+
+ if (test_bit(1, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP, 2, &buffer[pos]);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec mpl3115_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 20,
+ .storagebits = 32,
+ .shift = 12,
+ .endianness = IIO_BE,
+ }
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_info mpl3115_info = {
+ .read_raw = &mpl3115_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mpl3115_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mpl3115_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MPL3115_WHO_AM_I);
+ if (ret < 0)
+ return ret;
+ if (ret != MPL3115_DEVICE_ID)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mpl3115_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mpl3115_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpl3115_channels);
+
+ /* software reset, I2C transfer is aborted (fails) */
+ i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
+ MPL3115_CTRL_RESET);
+ msleep(50);
+
+ data->ctrl_reg1 = MPL3115_CTRL_OS_258MS;
+ ret = i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ mpl3115_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int mpl3115_standby(struct mpl3115_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE);
+}
+
+static int mpl3115_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ mpl3115_standby(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mpl3115_suspend(struct device *dev)
+{
+ return mpl3115_standby(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static int mpl3115_resume(struct device *dev)
+{
+ struct mpl3115_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+static SIMPLE_DEV_PM_OPS(mpl3115_pm_ops, mpl3115_suspend, mpl3115_resume);
+#define MPL3115_PM_OPS (&mpl3115_pm_ops)
+#else
+#define MPL3115_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mpl3115_id[] = {
+ { "mpl3115", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpl3115_id);
+
+static struct i2c_driver mpl3115_driver = {
+ .driver = {
+ .name = "mpl3115",
+ .pm = MPL3115_PM_OPS,
+ },
+ .probe = mpl3115_probe,
+ .remove = mpl3115_remove,
+ .id_table = mpl3115_id,
+};
+module_i2c_driver(mpl3115_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL3115 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c203..0717940 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ int cb_destroy;
+
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- if (iwcm_deref_id(cm_id_priv) &&
- test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
+
+ /*
+ * Test bit before deref in case the cm_id gets freed on another
+ * thread.
+ */
+ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e..a283274 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
- (udata)->inbuf = (void __user *) (ibuf); \
+ (udata)->inbuf = (const void __user *) (ibuf); \
(udata)->outbuf = (void __user *) (obuf); \
(udata)->inlen = (ilen); \
(udata)->outlen = (olen); \
} while (0)
+#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
+ do { \
+ (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
+ (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
+ (udata)->inlen = (ilen); \
+ (udata)->outlen = (olen); \
+ } while (0)
+
/*
* Our lifetime rules for these structs are the following:
*
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7d..f1cc838 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
+ if (kern_spec->reserved)
+ return -EINVAL;
+
ib_spec->type = kern_spec->type;
switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
void *ib_spec;
int i;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
if (ucore->outlen < sizeof(resp))
return -ENOSPC;
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
return -EINVAL;
+ if (cmd.flow_attr.reserved[0] ||
+ cmd.flow_attr.reserved[1])
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs) {
kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
i, cmd.flow_attr.size);
+ err = -EINVAL;
goto err_free;
}
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
int ret;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (ret)
return ret;
+ if (cmd.comp_mask)
+ return -EINVAL;
+
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
file->ucontext);
if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3438694..08219fb 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
return -EINVAL;
+ if (ex_hdr.cmd_hdr_reserved)
+ return -EINVAL;
+
if (ex_hdr.response) {
if (!hdr.out_words && !ex_hdr.provider_out_words)
return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *) (unsigned long) ex_hdr.response,
+ (hdr.out_words + ex_hdr.provider_out_words) * 8))
+ return -EFAULT;
} else {
if (hdr.out_words || ex_hdr.provider_out_words)
return -EINVAL;
}
- INIT_UDATA(&ucore,
- (hdr.in_words) ? buf : 0,
- (unsigned long)ex_hdr.response,
- hdr.in_words * 8,
- hdr.out_words * 8);
-
- INIT_UDATA(&uhw,
- (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
- (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
+ INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
+ hdr.in_words * 8, hdr.out_words * 8);
+
+ INIT_UDATA_BUF_OR_NULL(&uhw,
+ buf + ucore.inlen,
+ (unsigned long) ex_hdr.response + ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
err = uverbs_ex_cmd_table[command](file,
&ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76..4512687 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
- (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
- (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
- struct l2t_entry *l2t)
-{
- unsigned int ntuple = 0;
- u32 viid;
-
- switch (dev->rdev.lldi.filt_mode) {
-
- /* default filter mode */
- case HW_TPL_FR_MT_PR_IV_P_FC:
- if (l2t->vlan == VLAN_NONE)
- ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
- else {
- ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
- }
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- case HW_TPL_FR_MT_PR_OV_P_FC: {
- viid = cxgb4_port_viid(l2t->neigh->dev);
-
- ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
- ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
- ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- }
- default:
- break;
- }
- return ntuple;
-}
-
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev,
- ep->dst, ep->l2t));
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req->opt2 = cpu_to_be32(opt2);
} else {
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0);
- req6->params = cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst,
- ep->l2t));
+ req6->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req6->opt2 = cpu_to_be32(opt2);
}
} else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
- select_ntuple(ep->com.dev,
- ep->dst, ep->l2t)));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t)));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
- req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- if (dev->rdev.lldi.enable_fw_ofld_conn)
+ if (dev->rdev.lldi.enable_fw_ofld_conn &&
+ ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
cm_id->local_addr.ss_family, ep);
else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
- stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
- - dev->rdev.lldi.tids->sftid_base
- + dev->rdev.lldi.tids->nstids;
+ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
- filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
+ filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
+ dev->rdev.lldi.ports[0],
+ e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb2..84e4500 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
-int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
u32 remain = len;
u32 dmalen;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c8..cdc7df4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
*/
#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
#include <linux/module.h>
#include <net/rtnetlink.h>
#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return -EINVAL;
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!pdev)
+ if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
ppriv = netdev_priv(pdev);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd..d2965e4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
break;
case EV_ABS:
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo)
+ return;
+
__set_bit(code, dev->absbit);
break;
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 8755f5f..0cb7ef5 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -124,7 +124,7 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
- unsigned int ch_flags;
+ unsigned int ch_flags = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
@@ -133,18 +133,20 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
goto out;
for (i = 0; i < count; i++) {
- switch (fp[i]) {
- case TTY_FRAME:
- ch_flags = SERIO_FRAME;
- break;
-
- case TTY_PARITY:
- ch_flags = SERIO_PARITY;
- break;
-
- default:
- ch_flags = 0;
- break;
+ if (fp) {
+ switch (fp[i]) {
+ case TTY_FRAME:
+ ch_flags = SERIO_FRAME;
+ break;
+
+ case TTY_PARITY:
+ ch_flags = SERIO_PARITY;
+ break;
+
+ default:
+ ch_flags = 0;
+ break;
+ }
}
serio_interrupt(serport->serio, cp[i], ch_flags);
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6..aa127ba 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
}
}
-static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+static irqreturn_t zforce_irq(int irq, void *dev_id)
+{
+ struct zforce_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+
+ if (ts->suspended && device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
u8 *payload;
/*
- * When suspended, emit a wakeup signal if necessary and return.
+ * When still suspended, return.
* Due to the level-interrupt we will get re-triggered later.
*/
if (ts->suspended) {
- if (device_may_wakeup(&client->dev))
- pm_wakeup_event(&client->dev, 500);
msleep(20);
return IRQ_HANDLED;
}
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
* Therefore we can trigger the interrupt anytime it is low and do
* not need to limit it to the interrupt edge.
*/
- ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- zforce_interrupt,
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ zforce_irq, zforce_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
input_dev->name, ts);
if (ret) {
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd02..4a48255 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
int i;
struct pci_dev *tmp_hfcpci = NULL;
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, hfcpci_revision);
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63a..33eeb46 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
char tmp[64];
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, telespci_revision);
printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 0518835..a97263e 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
if (i % 2)
goto err;
- mutex_lock(&chip->lock);
-
for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
- if (ret) {
- mutex_unlock(&chip->lock);
+ if (ret)
return -EINVAL;
- }
}
- mutex_unlock(&chip->lock);
-
return size;
err:
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
+ int ret;
mutex_lock(&chip->lock);
chip->engine_idx = nr;
lp5521_load_engine(chip);
+ ret = lp5521_update_program_memory(chip, buf, len);
mutex_unlock(&chip->lock);
- return lp5521_update_program_memory(chip, buf, len);
+ return ret;
}
store_load(1)
store_load(2)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 6b553d9..fd9ab5f 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
if (i % 2)
goto err;
- mutex_lock(&chip->lock);
-
for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
- if (ret) {
- mutex_unlock(&chip->lock);
+ if (ret)
return -EINVAL;
- }
}
- mutex_unlock(&chip->lock);
-
return size;
err:
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev,
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
+ int ret;
mutex_lock(&chip->lock);
chip->engine_idx = nr;
lp5523_load_engine_and_select_page(chip);
+ ret = lp5523_update_program_memory(chip, buf, len);
mutex_unlock(&chip->lock);
- return lp5523_update_program_memory(chip, buf, len);
+ return ret;
}
store_load(1)
store_load(2)
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d26a312..3067d56 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -32,7 +32,7 @@ config ADB_MACII
config ADB_MACIISI
bool "Include Mac IIsi ADB driver"
- depends on ADB && MAC
+ depends on ADB && MAC && BROKEN
help
Say Y here if want your kernel to support Macintosh systems that use
the Mac IIsi style ADB. This includes the IIsi, IIvi, IIvx, Classic
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1..4c9852d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
if (watermark <= WATERMARK_METADATA) {
SET_GC_MARK(b, GC_MARK_METADATA);
+ SET_GC_MOVE(b, 0);
b->prio = BTREE_PRIO;
} else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MOVE(b, 0);
b->prio = INITIAL_PRIO;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a..754f4317 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint8_t gc_gen;
- uint16_t gc_mark;
+ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
unsigned char writeback_percent;
unsigned writeback_delay;
- int writeback_rate_change;
- int64_t writeback_rate_derivative;
uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+ int64_t writeback_rate_derivative;
+ int64_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
unsigned writeback_rate_d_term;
unsigned writeback_rate_p_term_inverse;
- unsigned writeback_rate_d_smooth;
};
enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
* call prio_write() to keep gens from wrapping.
*/
uint8_t need_save_prio;
- unsigned gc_move_threshold;
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765a..31bb53f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
+ /* don't reclaim buckets to which writeback keys point */
+ rcu_read_lock();
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+ unsigned j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ dc = container_of(d, struct cached_dev, disk);
+
+ spin_lock(&dc->writeback_keys.lock);
+ rbtree_postorder_for_each_entry_safe(w, n,
+ &dc->writeback_keys.keys, node)
+ for (j = 0; j < KEY_PTRS(&w->key); j++)
+ SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+ GC_MARK_DIRTY);
+ spin_unlock(&dc->writeback_keys.lock);
+ }
+ rcu_read_unlock();
+
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
- if (KEY_PTRS(replace_key) != KEY_PTRS(k))
+ if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+ KEY_DIRTY(k) != KEY_DIRTY(replace_key))
goto check_failed;
/* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
struct bkey *replace_key;
};
-int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
struct btree_insert_op *op = container_of(b_op,
struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e..f2f0998 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
unsigned i;
for (i = 0; i < KEY_PTRS(k); i++) {
- struct cache *ca = PTR_CACHE(c, k, i);
struct bucket *g = PTR_BUCKET(c, k, i);
- if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+ if (GC_MOVE(g))
return true;
}
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
+ struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
if (error)
io->op.error = error;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(io->op.c, &b->key, 0)) {
+ io->op.error = -EINTR;
+ }
bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
if (!w)
break;
+ if (ptr_stale(c, &w->key, 0)) {
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ continue;
+ }
+
io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
* DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
static unsigned bucket_heap_top(struct cache *ca)
{
- return GC_SECTORS_USED(heap_peek(&ca->heap));
+ struct bucket *b;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
sectors_to_move -= GC_SECTORS_USED(b);
}
- ca->gc_move_threshold = bucket_heap_top(ca);
-
- pr_debug("threshold %u", ca->gc_move_threshold);
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
}
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd..c57bfa0 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
return ca->sb.block_size == c->sb.block_size &&
- ca->sb.bucket_size == c->sb.block_size &&
+ ca->sb.bucket_size == c->sb.bucket_size &&
ca->sb.nr_in_set == c->sb.nr_in_set;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2b..a1f8561 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
rw_attribute(writeback_rate_d_term);
rw_attribute(writeback_rate_p_term_inverse);
-rw_attribute(writeback_rate_d_smooth);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_print(writeback_rate, dc->writeback_rate.rate);
+ sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
var_print(writeback_rate_d_term);
var_print(writeback_rate_p_term_inverse);
- var_print(writeback_rate_d_smooth);
if (attr == &sysfs_writeback_rate_debug) {
+ char rate[20];
char dirty[20];
- char derivative[20];
char target[20];
- bch_hprint(dirty,
- bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ char proportional[20];
+ char derivative[20];
+ char change[20];
+ s64 next_io;
+
+ bch_hprint(rate, dc->writeback_rate.rate << 9);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,dc->writeback_rate_proportional << 9);
+ bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(change, dc->writeback_rate_change << 9);
+
+ next_io = div64_s64(dc->writeback_rate.next - local_clock(),
+ NSEC_PER_MSEC);
return sprintf(buf,
- "rate:\t\t%u\n"
- "change:\t\t%i\n"
+ "rate:\t\t%s/sec\n"
"dirty:\t\t%s\n"
+ "target:\t\t%s\n"
+ "proportional:\t%s\n"
"derivative:\t%s\n"
- "target:\t\t%s\n",
- dc->writeback_rate.rate,
- dc->writeback_rate_change,
- dirty, derivative, target);
+ "change:\t\t%s/sec\n"
+ "next io:\t%llims\n",
+ rate, dirty, target, proportional,
+ derivative, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, 1000000);
+
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- d_strtoul(writeback_rate_update_seconds);
+ sysfs_strtoul_clamp(writeback_rate,
+ dc->writeback_rate.rate, 1, INT_MAX);
+
+ d_strtoul_nonzero(writeback_rate_update_seconds);
d_strtoul(writeback_rate_d_term);
- d_strtoul(writeback_rate_p_term_inverse);
- sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
- dc->writeback_rate_p_term_inverse, 1, INT_MAX);
- d_strtoul(writeback_rate_d_smooth);
+ d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_d_term,
&sysfs_writeback_rate_p_term_inverse,
- &sysfs_writeback_rate_d_smooth,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
&sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214e..bb37618 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+ if (time_before64(now + NSEC_PER_SEC, d->next))
+ d->next = now + NSEC_PER_SEC;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
return time_after64(d->next, now)
? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3..1030c60 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
_r; \
})
-#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL)
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
#define heap_full(h) ((h)->used == (h)->size)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1..6c44fe0 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
/* PD controller */
- int change = 0;
- int64_t error;
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+ int64_t proportional = dirty - target;
+ int64_t change;
dc->disk.sectors_dirty_last = dirty;
- derivative *= dc->writeback_rate_d_term;
- derivative = clamp(derivative, -dirty, dirty);
+ /* Scale to sectors per second */
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- dc->writeback_rate_d_smooth, 0);
+ proportional *= dc->writeback_rate_update_seconds;
+ proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
- /* Avoid divide by zero */
- if (!target)
- goto out;
+ derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
- error = div64_s64((dirty + derivative - target) << 8, target);
+ derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+ (dc->writeback_rate_d_term /
+ dc->writeback_rate_update_seconds) ?: 1, 0);
+
+ derivative *= dc->writeback_rate_d_term;
+ derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
- change = div_s64((dc->writeback_rate.rate * error) >> 8,
- dc->writeback_rate_p_term_inverse);
+ change = proportional + derivative;
/* Don't increase writeback rate if the device isn't keeping up */
if (change > 0 &&
time_after64(local_clock(),
- dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+ dc->writeback_rate.next + NSEC_PER_MSEC))
change = 0;
dc->writeback_rate.rate =
- clamp_t(int64_t, dc->writeback_rate.rate + change,
+ clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
1, NSEC_PER_MSEC);
-out:
+
+ dc->writeback_rate_proportional = proportional;
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
- uint64_t ret;
-
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
- ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
-
- return min_t(uint64_t, ret, HZ);
+ return bch_next_delay(&dc->writeback_rate, sectors);
}
struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
if (KEY_START(&w->key) != dc->last_read ||
jiffies_to_msecs(delay) > 50)
while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
while (delay &&
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
}
}
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
+
+ dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
}
int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
- dc->writeback_rate_update_seconds = 30;
- dc->writeback_rate_d_term = 16;
- dc->writeback_rate_p_term_inverse = 64;
- dc->writeback_rate_d_smooth = 8;
+ dc->writeback_rate_update_seconds = 5;
+ dc->writeback_rate_d_term = 30;
+ dc->writeback_rate_p_term_inverse = 6000;
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
return PTR_ERR(dc->writeback_thread);
- set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
-
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 12dc29b..4195a01 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1635,7 +1635,7 @@ int bitmap_create(struct mddev *mddev)
sector_t blocks = mddev->resync_max_sectors;
struct file *file = mddev->bitmap_info.file;
int err;
- struct sysfs_dirent *bm = NULL;
+ struct kernfs_node *bm = NULL;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index df4aeb6..30210b9 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -225,7 +225,7 @@ struct bitmap {
wait_queue_head_t overflow_wait;
wait_queue_head_t behind_wait;
- struct sysfs_dirent *sysfs_can_clear;
+ struct kernfs_node *sysfs_can_clear;
};
/* the bitmap API */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 21f4d7f..369d919 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1077,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@@ -1155,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1563,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@@ -1645,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -2788,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
@@ -5760,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
} else
@@ -7706,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev,
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
- rdev->saved_raid_disk < 0)
+ ! (rdev->saved_raid_disk >= 0 &&
+ !test_bit(Bitmap_sync, &rdev->flags)))
continue;
rdev->recovery_offset = 0;
@@ -7787,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev)
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
remove_and_add_spares(mddev, NULL);
- mddev->pers->spare_active(mddev);
+ /* There is no thread, but we need to call
+ * ->spare_active and clear saved_raid_disk
+ */
+ md_reap_sync_thread(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2f5cc8a..07bba96 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -106,7 +106,7 @@ struct md_rdev {
*/
struct work_struct del_work; /* used for delayed sysfs removal */
- struct sysfs_dirent *sysfs_state; /* handle for 'state'
+ struct kernfs_node *sysfs_state; /* handle for 'state'
* sysfs entry */
struct badblocks {
@@ -129,6 +129,9 @@ struct md_rdev {
enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
+ Bitmap_sync, /* ..actually, not quite In_sync. Need a
+ * bitmap-based recovery to get fully in sync
+ */
Unmerged, /* device is being added to array and should
* be considerred for bvec_merge_fn but not
* yet for actual IO
@@ -376,10 +379,10 @@ struct mddev {
sector_t resync_max; /* resync should pause
* when it gets here */
- struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
+ struct kernfs_node *sysfs_state; /* handle for 'array_state'
* file in sysfs.
*/
- struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
+ struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
struct work_struct del_work; /* used for delayed sysfs removal */
@@ -498,13 +501,13 @@ struct md_sysfs_entry {
};
extern struct attribute_group md_bitmap_group;
-static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
+static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
if (sd)
return sysfs_get_dirent(sd, name);
return sd;
}
-static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
+static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
{
if (sd)
sysfs_notify_dirent(sd);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540..a49cfcc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -924,9 +924,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
conf->next_window_requests++;
else
conf->current_window_requests++;
- }
- if (bio->bi_sector >= conf->start_next_window)
sector = conf->start_next_window;
+ }
}
conf->nr_pending++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e83..06eeb99 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1319,7 +1319,7 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
- sectors_handled = (r10_bio->sectors + max_sectors
+ sectors_handled = (r10_bio->sector + max_sectors
- bio->bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
- spin_unlock(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
- put_buf(r10_bio);
- if (rb2)
- atomic_dec(&rb2->remaining);
- r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
+ put_buf(r10_bio);
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
break;
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cc055da..cbb1571 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -687,7 +687,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru));
+ BUG_ON(list_empty(&sh->lru) &&
+ !test_bit(STRIPE_EXPANDING, &sh->state));
list_del_init(&sh->lru);
if (sh->group) {
sh->group->stripes_cnt--;
@@ -3608,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
*/
set_bit(R5_Insync, &dev->flags);
- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+ if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@@ -3621,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+ if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 11e20af..705698f 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pcr->remove_pci = true;
- cancel_delayed_work(&pcr->carddet_work);
- cancel_delayed_work(&pcr->idle_work);
+ /* Disable interrupts at the pcr level */
+ spin_lock_irq(&pcr->lock);
+ rtsx_pci_writel(pcr, RTSX_BIER, 0);
+ pcr->bier = 0;
+ spin_unlock_irq(&pcr->lock);
+
+ cancel_delayed_work_sync(&pcr->carddet_work);
+ cancel_delayed_work_sync(&pcr->idle_work);
mfd_remove_devices(&pcidev->dev);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a3e291d..6cb388e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -525,4 +525,5 @@ source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/mic/Kconfig"
+source "drivers/misc/genwqe/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f45473e..99b9424 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,3 +53,4 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
+obj-$(CONFIG_GENWQE) += genwqe/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 0daadcf..d3eee11 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -641,7 +641,7 @@ static const struct attribute_group ad525x_group_commands = {
.attrs = ad525x_attributes_commands,
};
-int ad_dpot_add_files(struct device *dev,
+static int ad_dpot_add_files(struct device *dev,
unsigned features, unsigned rdac)
{
int err = sysfs_create_file(&dev->kobj,
@@ -666,7 +666,7 @@ int ad_dpot_add_files(struct device *dev,
return err;
}
-inline void ad_dpot_remove_files(struct device *dev,
+static inline void ad_dpot_remove_files(struct device *dev,
unsigned features, unsigned rdac)
{
sysfs_remove_file(&dev->kobj,
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
index 3abfcec..a7c1629 100644
--- a/drivers/misc/bmp085-i2c.c
+++ b/drivers/misc/bmp085-i2c.c
@@ -49,7 +49,7 @@ static int bmp085_i2c_probe(struct i2c_client *client,
return err;
}
- return bmp085_probe(&client->dev, regmap);
+ return bmp085_probe(&client->dev, regmap, client->irq);
}
static int bmp085_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c
index d6a5265..864ecac 100644
--- a/drivers/misc/bmp085-spi.c
+++ b/drivers/misc/bmp085-spi.c
@@ -41,7 +41,7 @@ static int bmp085_spi_probe(struct spi_device *client)
return err;
}
- return bmp085_probe(&client->dev, regmap);
+ return bmp085_probe(&client->dev, regmap, client->irq);
}
static int bmp085_spi_remove(struct spi_device *client)
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 2704d88..820e53d 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -49,9 +49,11 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include "bmp085.h"
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/gpio.h>
#define BMP085_CHIP_ID 0x55
#define BMP085_CALIBRATION_DATA_START 0xAA
@@ -84,8 +86,19 @@ struct bmp085_data {
unsigned long last_temp_measurement;
u8 chip_id;
s32 b6; /* calculated temperature correction coefficient */
+ int irq;
+ struct completion done;
};
+static irqreturn_t bmp085_eoc_isr(int irq, void *devid)
+{
+ struct bmp085_data *data = devid;
+
+ complete(&data->done);
+
+ return IRQ_HANDLED;
+}
+
static s32 bmp085_read_calibration_data(struct bmp085_data *data)
{
u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
@@ -116,6 +129,9 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
s32 status;
mutex_lock(&data->lock);
+
+ init_completion(&data->done);
+
status = regmap_write(data->regmap, BMP085_CTRL_REG,
BMP085_TEMP_MEASUREMENT);
if (status < 0) {
@@ -123,7 +139,8 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
"Error while requesting temperature measurement.\n");
goto exit;
}
- msleep(BMP085_TEMP_CONVERSION_TIME);
+ wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
+ BMP085_TEMP_CONVERSION_TIME));
status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
&tmp, sizeof(tmp));
@@ -147,6 +164,9 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
s32 status;
mutex_lock(&data->lock);
+
+ init_completion(&data->done);
+
status = regmap_write(data->regmap, BMP085_CTRL_REG,
BMP085_PRESSURE_MEASUREMENT +
(data->oversampling_setting << 6));
@@ -157,8 +177,8 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
}
/* wait for the end of conversion */
- msleep(2+(3 << data->oversampling_setting));
-
+ wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
+ 2+(3 << data->oversampling_setting)));
/* copy data into a u32 (4 bytes), but skip the first byte. */
status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
((u8 *)&tmp)+1, 3);
@@ -420,7 +440,7 @@ struct regmap_config bmp085_regmap_config = {
};
EXPORT_SYMBOL_GPL(bmp085_regmap_config);
-int bmp085_probe(struct device *dev, struct regmap *regmap)
+int bmp085_probe(struct device *dev, struct regmap *regmap, int irq)
{
struct bmp085_data *data;
int err = 0;
@@ -434,6 +454,15 @@ int bmp085_probe(struct device *dev, struct regmap *regmap)
dev_set_drvdata(dev, data);
data->dev = dev;
data->regmap = regmap;
+ data->irq = irq;
+
+ if (data->irq > 0) {
+ err = devm_request_irq(dev, data->irq, bmp085_eoc_isr,
+ IRQF_TRIGGER_RISING, "bmp085",
+ data);
+ if (err < 0)
+ goto exit_free;
+ }
/* Initialize the BMP085 chip */
err = bmp085_init_client(data);
diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h
index 2b8f615..8b8e3b1 100644
--- a/drivers/misc/bmp085.h
+++ b/drivers/misc/bmp085.h
@@ -26,7 +26,7 @@
extern struct regmap_config bmp085_regmap_config;
-int bmp085_probe(struct device *dev, struct regmap *regmap);
+int bmp085_probe(struct device *dev, struct regmap *regmap, int irq);
int bmp085_remove(struct device *dev);
int bmp085_detect(struct device *dev);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 3a015ab..78e55b5 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -378,7 +378,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_erase);
sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
- spi_set_drvdata(spi, NULL);
kfree(edev);
return 0;
}
diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig
new file mode 100644
index 0000000..6069d8c
--- /dev/null
+++ b/drivers/misc/genwqe/Kconfig
@@ -0,0 +1,13 @@
+#
+# IBM Accelerator Family 'GenWQE'
+#
+
+menuconfig GENWQE
+ tristate "GenWQE PCIe Accelerator"
+ depends on PCI && 64BIT
+ select CRC_ITU_T
+ default n
+ help
+ Enables PCIe card driver for IBM GenWQE accelerators.
+ The user-space interface is described in
+ include/linux/genwqe/genwqe_card.h.
diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile
new file mode 100644
index 0000000..98a2b4f
--- /dev/null
+++ b/drivers/misc/genwqe/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for GenWQE driver
+#
+
+obj-$(CONFIG_GENWQE) := genwqe_card.o
+genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \
+ card_debugfs.o card_utils.o
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
new file mode 100644
index 0000000..74d51c9b
--- /dev/null
+++ b/drivers/misc/genwqe/card_base.c
@@ -0,0 +1,1205 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Module initialization and PCIe setup. Card health monitoring and
+ * recovery functionality. Character device creation and deletion are
+ * controlled from here.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include <linux/aer.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/log2.h>
+#include <linux/genwqe/genwqe_card.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>");
+MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>");
+MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>");
+
+MODULE_DESCRIPTION("GenWQE Card");
+MODULE_VERSION(DRV_VERS_STRING);
+MODULE_LICENSE("GPL");
+
+static char genwqe_driver_name[] = GENWQE_DEVNAME;
+static struct class *class_genwqe;
+static struct dentry *debugfs_genwqe;
+static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
+
+/* PCI structure for identifying device by PCI vendor and device ID */
+static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = {
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
+ .class = (PCI_CLASSCODE_GENWQE5 << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ /* Initial SR-IOV bring-up image */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
+ .device = 0x0000, /* VF Device ID */
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ /* Fixed up image */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
+ .device = 0x0000, /* VF Device ID */
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ /* Even one more ... */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW,
+ .class = (PCI_CLASSCODE_GENWQE5 << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ { 0, } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, genwqe_device_table);
+
+/**
+ * genwqe_dev_alloc() - Create and prepare a new card descriptor
+ *
+ * Return: Pointer to card descriptor, or ERR_PTR(err) on error
+ */
+static struct genwqe_dev *genwqe_dev_alloc(void)
+{
+ unsigned int i = 0, j;
+ struct genwqe_dev *cd;
+
+ for (i = 0; i < GENWQE_CARD_NO_MAX; i++) {
+ if (genwqe_devices[i] == NULL)
+ break;
+ }
+ if (i >= GENWQE_CARD_NO_MAX)
+ return ERR_PTR(-ENODEV);
+
+ cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL);
+ if (!cd)
+ return ERR_PTR(-ENOMEM);
+
+ cd->card_idx = i;
+ cd->class_genwqe = class_genwqe;
+ cd->debugfs_genwqe = debugfs_genwqe;
+
+ init_waitqueue_head(&cd->queue_waitq);
+
+ spin_lock_init(&cd->file_lock);
+ INIT_LIST_HEAD(&cd->file_list);
+
+ cd->card_state = GENWQE_CARD_UNUSED;
+ spin_lock_init(&cd->print_lock);
+
+ cd->ddcb_software_timeout = genwqe_ddcb_software_timeout;
+ cd->kill_timeout = genwqe_kill_timeout;
+
+ for (j = 0; j < GENWQE_MAX_VFS; j++)
+ cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec;
+
+ genwqe_devices[i] = cd;
+ return cd;
+}
+
+static void genwqe_dev_free(struct genwqe_dev *cd)
+{
+ if (!cd)
+ return;
+
+ genwqe_devices[cd->card_idx] = NULL;
+ kfree(cd);
+}
+
+/**
+ * genwqe_bus_reset() - Card recovery
+ *
+ * pci_reset_function() will recover the device and ensure that the
+ * registers are accessible again when it completes with success. If
+ * not, the card will stay dead and registers will be unaccessible
+ * still.
+ */
+static int genwqe_bus_reset(struct genwqe_dev *cd)
+{
+ int bars, rc = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ void __iomem *mmio;
+
+ if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
+ return -EIO;
+
+ mmio = cd->mmio;
+ cd->mmio = NULL;
+ pci_iounmap(pci_dev, mmio);
+
+ bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
+ pci_release_selected_regions(pci_dev, bars);
+
+ /*
+ * Firmware/BIOS might change memory mapping during bus reset.
+ * Settings like enable bus-mastering, ... are backuped and
+ * restored by the pci_reset_function().
+ */
+ dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
+ rc = pci_reset_function(pci_dev);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: failed reset func (rc %d)\n", __func__, rc);
+ return rc;
+ }
+ dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);
+
+ /*
+ * Here is the right spot to clear the register read
+ * failure. pci_bus_reset() does this job in real systems.
+ */
+ cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
+ GENWQE_INJECT_GFIR_FATAL |
+ GENWQE_INJECT_GFIR_INFO);
+
+ rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: request bars failed (%d)\n", __func__, rc);
+ return -EIO;
+ }
+
+ cd->mmio = pci_iomap(pci_dev, 0, 0);
+ if (cd->mmio == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: mapping BAR0 failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * Hardware circumvention section. Certain bitstreams in our test-lab
+ * had different kinds of problems. Here is where we adjust those
+ * bitstreams to function will with this version of our device driver.
+ *
+ * Thise circumventions are applied to the physical function only.
+ * The magical numbers below are identifying development/manufacturing
+ * versions of the bitstream used on the card.
+ *
+ * Turn off error reporting for old/manufacturing images.
+ */
+
+bool genwqe_need_err_masking(struct genwqe_dev *cd)
+{
+ return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
+}
+
+static void genwqe_tweak_hardware(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /* Mask FIRs for development images */
+ if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) &&
+ ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) {
+ dev_warn(&pci_dev->dev,
+ "FIRs masked due to bitstream %016llx.%016llx\n",
+ cd->slu_unitcfg, cd->app_unitcfg);
+
+ __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR,
+ 0xFFFFFFFFFFFFFFFFull);
+
+ __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK,
+ 0x0000000000000000ull);
+ }
+}
+
+/**
+ * genwqe_recovery_on_fatal_gfir_required() - Version depended actions
+ *
+ * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
+ * be ignored. This is e.g. true for the bitstream we gave to the card
+ * manufacturer, but also for some old bitstreams we released to our
+ * test-lab.
+ */
+int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd)
+{
+ return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull;
+}
+
+int genwqe_flash_readback_fails(struct genwqe_dev *cd)
+{
+ return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
+}
+
+/**
+ * genwqe_T_psec() - Calculate PF/VF timeout register content
+ *
+ * Note: From a design perspective it turned out to be a bad idea to
+ * use codes here to specifiy the frequency/speed values. An old
+ * driver cannot understand new codes and is therefore always a
+ * problem. Better is to measure out the value or put the
+ * speed/frequency directly into a register which is always a valid
+ * value for old as well as for new software.
+ */
+/* T = 1/f */
+static int genwqe_T_psec(struct genwqe_dev *cd)
+{
+ u16 speed; /* 1/f -> 250, 200, 166, 175 */
+ static const int T[] = { 4000, 5000, 6000, 5714 };
+
+ speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
+ if (speed >= ARRAY_SIZE(T))
+ return -1; /* illegal value */
+
+ return T[speed];
+}
+
+/**
+ * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
+ *
+ * Do this _after_ card_reset() is called. Otherwise the values will
+ * vanish. The settings need to be done when the queues are inactive.
+ *
+ * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16.
+ * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16.
+ */
+static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
+{
+ u32 T = genwqe_T_psec(cd);
+ u64 x;
+
+ if (genwqe_pf_jobtimeout_msec == 0)
+ return false;
+
+ /* PF: large value needed, flash update 2sec per block */
+ x = ilog2(genwqe_pf_jobtimeout_msec *
+ 16000000000uL/(T * 15)) - 10;
+
+ genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
+ 0xff00 | (x & 0xff), 0);
+ return true;
+}
+
+/**
+ * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
+ */
+static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+ unsigned int vf;
+ u32 T = genwqe_T_psec(cd);
+ u64 x;
+
+ for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) {
+
+ if (cd->vf_jobtimeout_msec[vf] == 0)
+ continue;
+
+ x = ilog2(cd->vf_jobtimeout_msec[vf] *
+ 16000000000uL/(T * 15)) - 10;
+
+ genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
+ 0xff00 | (x & 0xff), vf + 1);
+ }
+ return true;
+}
+
+static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
+{
+ unsigned int type, e = 0;
+
+ for (type = 0; type < GENWQE_DBG_UNITS; type++) {
+ switch (type) {
+ case GENWQE_DBG_UNIT0:
+ e = genwqe_ffdc_buff_size(cd, 0);
+ break;
+ case GENWQE_DBG_UNIT1:
+ e = genwqe_ffdc_buff_size(cd, 1);
+ break;
+ case GENWQE_DBG_UNIT2:
+ e = genwqe_ffdc_buff_size(cd, 2);
+ break;
+ case GENWQE_DBG_REGS:
+ e = GENWQE_FFDC_REGS;
+ break;
+ }
+
+ /* currently support only the debug units mentioned here */
+ cd->ffdc[type].entries = e;
+ cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg),
+ GFP_KERNEL);
+ /*
+ * regs == NULL is ok, the using code treats this as no regs,
+ * Printing warning is ok in this case.
+ */
+ }
+ return 0;
+}
+
+static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd)
+{
+ unsigned int type;
+
+ for (type = 0; type < GENWQE_DBG_UNITS; type++) {
+ kfree(cd->ffdc[type].regs);
+ cd->ffdc[type].regs = NULL;
+ }
+}
+
+static int genwqe_read_ids(struct genwqe_dev *cd)
+{
+ int err = 0;
+ int slu_id;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "err: SLUID=%016llx\n", cd->slu_unitcfg);
+ err = -EIO;
+ goto out_err;
+ }
+
+ slu_id = genwqe_get_slu_id(cd);
+ if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) {
+ dev_err(&pci_dev->dev,
+ "err: incompatible SLU Architecture %u\n", slu_id);
+ err = -ENOENT;
+ goto out_err;
+ }
+
+ cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
+ if (cd->app_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "err: APPID=%016llx\n", cd->app_unitcfg);
+ err = -EIO;
+ goto out_err;
+ }
+ genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name));
+
+ /*
+ * Is access to all registers possible? If we are a VF the
+ * answer is obvious. If we run fully virtualized, we need to
+ * check if we can access all registers. If we do not have
+ * full access we will cause an UR and some informational FIRs
+ * in the PF, but that should not harm.
+ */
+ if (pci_dev->is_virtfn)
+ cd->is_privileged = 0;
+ else
+ cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
+ != IO_ILLEGAL_VALUE);
+
+ out_err:
+ return err;
+}
+
+static int genwqe_start(struct genwqe_dev *cd)
+{
+ int err;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ err = genwqe_read_ids(cd);
+ if (err)
+ return err;
+
+ if (genwqe_is_privileged(cd)) {
+ /* do this after the tweaks. alloc fail is acceptable */
+ genwqe_ffdc_buffs_alloc(cd);
+ genwqe_stop_traps(cd);
+
+ /* Collect registers e.g. FIRs, UNITIDs, traces ... */
+ genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs,
+ cd->ffdc[GENWQE_DBG_REGS].entries, 0);
+
+ genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0,
+ cd->ffdc[GENWQE_DBG_UNIT0].regs,
+ cd->ffdc[GENWQE_DBG_UNIT0].entries);
+
+ genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1,
+ cd->ffdc[GENWQE_DBG_UNIT1].regs,
+ cd->ffdc[GENWQE_DBG_UNIT1].entries);
+
+ genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2,
+ cd->ffdc[GENWQE_DBG_UNIT2].regs,
+ cd->ffdc[GENWQE_DBG_UNIT2].entries);
+
+ genwqe_start_traps(cd);
+
+ if (cd->card_state == GENWQE_CARD_FATAL_ERROR) {
+ dev_warn(&pci_dev->dev,
+ "[%s] chip reload/recovery!\n", __func__);
+
+ /*
+ * Stealth Mode: Reload chip on either hot
+ * reset or PERST.
+ */
+ cd->softreset = 0x7Cull;
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
+ cd->softreset);
+
+ err = genwqe_bus_reset(cd);
+ if (err != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: bus reset failed!\n",
+ __func__);
+ goto out;
+ }
+
+ /*
+ * Re-read the IDs because
+ * it could happen that the bitstream load
+ * failed!
+ */
+ err = genwqe_read_ids(cd);
+ if (err)
+ goto out;
+ }
+ }
+
+ err = genwqe_setup_service_layer(cd); /* does a reset to the card */
+ if (err != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: could not setup servicelayer!\n", __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */
+ genwqe_tweak_hardware(cd);
+
+ genwqe_setup_pf_jtimer(cd);
+ genwqe_setup_vf_jtimer(cd);
+ }
+
+ err = genwqe_device_create(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: chdev init failed! (err=%d)\n", err);
+ goto out_release_service_layer;
+ }
+ return 0;
+
+ out_release_service_layer:
+ genwqe_release_service_layer(cd);
+ out:
+ if (genwqe_is_privileged(cd))
+ genwqe_ffdc_buffs_free(cd);
+ return -EIO;
+}
+
+/**
+ * genwqe_stop() - Stop card operation
+ *
+ * Recovery notes:
+ * As long as genwqe_thread runs we might access registers during
+ * error data capture. Same is with the genwqe_health_thread.
+ * When genwqe_bus_reset() fails this function might called two times:
+ * first by the genwqe_health_thread() and later by genwqe_remove() to
+ * unbind the device. We must be able to survive that.
+ *
+ * This function must be robust enough to be called twice.
+ */
+static int genwqe_stop(struct genwqe_dev *cd)
+{
+ genwqe_finish_queue(cd); /* no register access */
+ genwqe_device_remove(cd); /* device removed, procs killed */
+ genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */
+
+ if (genwqe_is_privileged(cd)) {
+ pci_disable_sriov(cd->pci_dev); /* access pci config space */
+ genwqe_ffdc_buffs_free(cd);
+ }
+
+ return 0;
+}
+
+/**
+ * genwqe_recover_card() - Try to recover the card if it is possible
+ *
+ * If fatal_err is set no register access is possible anymore. It is
+ * likely that genwqe_start fails in that situation. Proper error
+ * handling is required in this case.
+ *
+ * genwqe_bus_reset() will cause the pci code to call genwqe_remove()
+ * and later genwqe_probe() for all virtual functions.
+ */
+static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ genwqe_stop(cd);
+
+ /*
+ * Make sure chip is not reloaded to maintain FFDC. Write SLU
+ * Reset Register, CPLDReset field to 0.
+ */
+ if (!fatal_err) {
+ cd->softreset = 0x70ull;
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
+ }
+
+ rc = genwqe_bus_reset(cd);
+ if (rc != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: card recovery impossible!\n", __func__);
+ return rc;
+ }
+
+ rc = genwqe_start(cd);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: failed to launch device!\n", __func__);
+ return rc;
+ }
+ return 0;
+}
+
+static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
+{
+ *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ return (*gfir & GFIR_ERR_TRIGGER) &&
+ genwqe_recovery_on_fatal_gfir_required(cd);
+}
+
+/**
+ * genwqe_fir_checking() - Check the fault isolation registers of the card
+ *
+ * If this code works ok, can be tried out with help of the genwqe_poke tool:
+ * sudo ./tools/genwqe_poke 0x8 0xfefefefefef
+ *
+ * Now the relevant FIRs/sFIRs should be printed out and the driver should
+ * invoke recovery (devices are removed and readded).
+ */
+static u64 genwqe_fir_checking(struct genwqe_dev *cd)
+{
+ int j, iterations = 0;
+ u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec;
+ u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ healthMonitor:
+ iterations++;
+ if (iterations > 16) {
+ dev_err(&pci_dev->dev, "* exit looping after %d times\n",
+ iterations);
+ goto fatal_error;
+ }
+
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir != 0x0)
+ dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n",
+ IO_SLC_CFGREG_GFIR, gfir);
+ if (gfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /*
+ * Avoid printing when to GFIR bit is on prevents contignous
+ * printout e.g. for the following bug:
+ * FIR set without a 2ndary FIR/FIR cannot be cleared
+ * Comment out the following if to get the prints:
+ */
+ if (gfir == 0)
+ return 0;
+
+ gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */
+
+ for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */
+
+ /* read the primary FIR (pfir) */
+ fir_addr = (uid << 24) + 0x08;
+ fir = __genwqe_readq(cd, fir_addr);
+ if (fir == 0x0)
+ continue; /* no error in this unit */
+
+ dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir);
+ if (fir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /* read primary FEC */
+ fec_addr = (uid << 24) + 0x18;
+ fec = __genwqe_readq(cd, fec_addr);
+
+ dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec);
+ if (fec == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) {
+
+ /* secondary fir empty, skip it */
+ if ((fir & mask) == 0x0)
+ continue;
+
+ sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
+ sfir = __genwqe_readq(cd, sfir_addr);
+
+ if (sfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+ dev_err(&pci_dev->dev,
+ "* 0x%08x 0x%016llx\n", sfir_addr, sfir);
+
+ sfec_addr = (uid << 24) + 0x300 + 0x08 * j;
+ sfec = __genwqe_readq(cd, sfec_addr);
+
+ if (sfec == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+ dev_err(&pci_dev->dev,
+ "* 0x%08x 0x%016llx\n", sfec_addr, sfec);
+
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /* gfir turned on during routine! get out and
+ start over. */
+ if ((gfir_masked == 0x0) &&
+ (gfir & GFIR_ERR_TRIGGER)) {
+ goto healthMonitor;
+ }
+
+ /* do not clear if we entered with a fatal gfir */
+ if (gfir_masked == 0x0) {
+
+ /* NEW clear by mask the logged bits */
+ sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
+ __genwqe_writeq(cd, sfir_addr, sfir);
+
+ dev_dbg(&pci_dev->dev,
+ "[HM] Clearing 2ndary FIR 0x%08x "
+ "with 0x%016llx\n", sfir_addr, sfir);
+
+ /*
+ * note, these cannot be error-Firs
+ * since gfir_masked is 0 after sfir
+ * was read. Also, it is safe to do
+ * this write if sfir=0. Still need to
+ * clear the primary. This just means
+ * there is no secondary FIR.
+ */
+
+ /* clear by mask the logged bit. */
+ fir_clr_addr = (uid << 24) + 0x10;
+ __genwqe_writeq(cd, fir_clr_addr, mask);
+
+ dev_dbg(&pci_dev->dev,
+ "[HM] Clearing primary FIR 0x%08x "
+ "with 0x%016llx\n", fir_clr_addr,
+ mask);
+ }
+ }
+ }
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) {
+ /*
+ * Check once more that it didn't go on after all the
+ * FIRS were cleared.
+ */
+ dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n",
+ iterations);
+ goto healthMonitor;
+ }
+ return gfir_masked;
+
+ fatal_error:
+ return IO_ILLEGAL_VALUE;
+}
+
+/**
+ * genwqe_health_thread() - Health checking thread
+ *
+ * This thread is only started for the PF of the card.
+ *
+ * This thread monitors the health of the card. A critical situation
+ * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In
+ * this case we need to be recovered from outside. Writing to
+ * registers will very likely not work either.
+ *
+ * This thread must only exit if kthread_should_stop() becomes true.
+ *
+ * Condition for the health-thread to trigger:
+ * a) when a kthread_stop() request comes in or
+ * b) a critical GFIR occured
+ *
+ * Informational GFIRs are checked and potentially printed in
+ * health_check_interval seconds.
+ */
+static int genwqe_health_thread(void *data)
+{
+ int rc, should_stop = 0;
+ struct genwqe_dev *cd = data;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
+
+ while (!kthread_should_stop()) {
+ rc = wait_event_interruptible_timeout(cd->health_waitq,
+ (genwqe_health_check_cond(cd, &gfir) ||
+ (should_stop = kthread_should_stop())),
+ genwqe_health_check_interval * HZ);
+
+ if (should_stop)
+ break;
+
+ if (gfir == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] GFIR=%016llx\n", __func__, gfir);
+ goto fatal_error;
+ }
+
+ slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ if (slu_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] SLU_UNITCFG=%016llx\n",
+ __func__, slu_unitcfg);
+ goto fatal_error;
+ }
+
+ app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
+ if (app_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] APP_UNITCFG=%016llx\n",
+ __func__, app_unitcfg);
+ goto fatal_error;
+ }
+
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] %s: GFIR=%016llx\n", __func__,
+ (gfir & GFIR_ERR_TRIGGER) ? "err" : "info",
+ gfir);
+ goto fatal_error;
+ }
+
+ gfir_masked = genwqe_fir_checking(cd);
+ if (gfir_masked == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /*
+ * GFIR ErrorTrigger bits set => reset the card!
+ * Never do this for old/manufacturing images!
+ */
+ if ((gfir_masked) && !cd->skip_recovery &&
+ genwqe_recovery_on_fatal_gfir_required(cd)) {
+
+ cd->card_state = GENWQE_CARD_FATAL_ERROR;
+
+ rc = genwqe_recover_card(cd, 0);
+ if (rc < 0) {
+ /* FIXME Card is unusable and needs unbind! */
+ goto fatal_error;
+ }
+ }
+
+ cd->last_gfir = gfir;
+ cond_resched();
+ }
+
+ return 0;
+
+ fatal_error:
+ dev_err(&pci_dev->dev,
+ "[%s] card unusable. Please trigger unbind!\n", __func__);
+
+ /* Bring down logical devices to inform user space via udev remove. */
+ cd->card_state = GENWQE_CARD_FATAL_ERROR;
+ genwqe_stop(cd);
+
+ /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */
+ while (!kthread_should_stop())
+ cond_resched();
+
+ return -EIO;
+}
+
+static int genwqe_health_check_start(struct genwqe_dev *cd)
+{
+ int rc;
+
+ if (genwqe_health_check_interval <= 0)
+ return 0; /* valid for disabling the service */
+
+ /* moved before request_irq() */
+ /* init_waitqueue_head(&cd->health_waitq); */
+
+ cd->health_thread = kthread_run(genwqe_health_thread, cd,
+ GENWQE_DEVNAME "%d_health",
+ cd->card_idx);
+ if (IS_ERR(cd->health_thread)) {
+ rc = PTR_ERR(cd->health_thread);
+ cd->health_thread = NULL;
+ return rc;
+ }
+ return 0;
+}
+
+static int genwqe_health_thread_running(struct genwqe_dev *cd)
+{
+ return cd->health_thread != NULL;
+}
+
+static int genwqe_health_check_stop(struct genwqe_dev *cd)
+{
+ int rc;
+
+ if (!genwqe_health_thread_running(cd))
+ return -EIO;
+
+ rc = kthread_stop(cd->health_thread);
+ cd->health_thread = NULL;
+ return 0;
+}
+
+/**
+ * genwqe_pci_setup() - Allocate PCIe related resources for our card
+ */
+static int genwqe_pci_setup(struct genwqe_dev *cd)
+{
+ int err, bars;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
+ err = pci_enable_device_mem(pci_dev);
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "err: failed to enable pci memory (err=%d)\n", err);
+ goto err_out;
+ }
+
+ /* Reserve PCI I/O and memory resources */
+ err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: request bars failed (%d)\n", __func__, err);
+ err = -EIO;
+ goto err_disable_device;
+ }
+
+ /* check for 64-bit DMA address supported (DAC) */
+ if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
+ err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "err: DMA64 consistent mask error\n");
+ err = -EIO;
+ goto out_release_resources;
+ }
+ /* check for 32-bit DMA address supported (SAC) */
+ } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "err: DMA32 consistent mask error\n");
+ err = -EIO;
+ goto out_release_resources;
+ }
+ } else {
+ dev_err(&pci_dev->dev,
+ "err: neither DMA32 nor DMA64 supported\n");
+ err = -EIO;
+ goto out_release_resources;
+ }
+
+ pci_set_master(pci_dev);
+ pci_enable_pcie_error_reporting(pci_dev);
+
+ /* request complete BAR-0 space (length = 0) */
+ cd->mmio_len = pci_resource_len(pci_dev, 0);
+ cd->mmio = pci_iomap(pci_dev, 0, 0);
+ if (cd->mmio == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: mapping BAR0 failed\n", __func__);
+ err = -ENOMEM;
+ goto out_release_resources;
+ }
+
+ cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
+
+ err = genwqe_read_ids(cd);
+ if (err)
+ goto out_iounmap;
+
+ return 0;
+
+ out_iounmap:
+ pci_iounmap(pci_dev, cd->mmio);
+ out_release_resources:
+ pci_release_selected_regions(pci_dev, bars);
+ err_disable_device:
+ pci_disable_device(pci_dev);
+ err_out:
+ return err;
+}
+
+/**
+ * genwqe_pci_remove() - Free PCIe related resources for our card
+ */
+static void genwqe_pci_remove(struct genwqe_dev *cd)
+{
+ int bars;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (cd->mmio)
+ pci_iounmap(pci_dev, cd->mmio);
+
+ bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
+ pci_release_selected_regions(pci_dev, bars);
+ pci_disable_device(pci_dev);
+}
+
+/**
+ * genwqe_probe() - Device initialization
+ * @pdev: PCI device information struct
+ *
+ * Callable for multiple cards. This function is called on bind.
+ *
+ * Return: 0 if succeeded, < 0 when failed
+ */
+static int genwqe_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct genwqe_dev *cd;
+
+ genwqe_init_crc32();
+
+ cd = genwqe_dev_alloc();
+ if (IS_ERR(cd)) {
+ dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n",
+ (int)PTR_ERR(cd));
+ return PTR_ERR(cd);
+ }
+
+ dev_set_drvdata(&pci_dev->dev, cd);
+ cd->pci_dev = pci_dev;
+
+ err = genwqe_pci_setup(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: problems with PCI setup (err=%d)\n", err);
+ goto out_free_dev;
+ }
+
+ err = genwqe_start(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: cannot start card services! (err=%d)\n", err);
+ goto out_pci_remove;
+ }
+
+ if (genwqe_is_privileged(cd)) {
+ err = genwqe_health_check_start(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: cannot start health checking! "
+ "(err=%d)\n", err);
+ goto out_stop_services;
+ }
+ }
+ return 0;
+
+ out_stop_services:
+ genwqe_stop(cd);
+ out_pci_remove:
+ genwqe_pci_remove(cd);
+ out_free_dev:
+ genwqe_dev_free(cd);
+ return err;
+}
+
+/**
+ * genwqe_remove() - Called when device is removed (hot-plugable)
+ *
+ * Or when driver is unloaded respecitively when unbind is done.
+ */
+static void genwqe_remove(struct pci_dev *pci_dev)
+{
+ struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
+
+ genwqe_health_check_stop(cd);
+
+ /*
+ * genwqe_stop() must survive if it is called twice
+ * sequentially. This happens when the health thread calls it
+ * and fails on genwqe_bus_reset().
+ */
+ genwqe_stop(cd);
+ genwqe_pci_remove(cd);
+ genwqe_dev_free(cd);
+}
+
+/*
+ * genwqe_err_error_detected() - Error detection callback
+ *
+ * This callback is called by the PCI subsystem whenever a PCI bus
+ * error is detected.
+ */
+static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
+ enum pci_channel_state state)
+{
+ struct genwqe_dev *cd;
+
+ dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
+
+ if (pci_dev == NULL)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ cd = dev_get_drvdata(&pci_dev->dev);
+ if (cd == NULL)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
+{
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void genwqe_err_resume(struct pci_dev *dev)
+{
+}
+
+static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
+{
+ struct genwqe_dev *cd = dev_get_drvdata(&dev->dev);
+
+ if (numvfs > 0) {
+ genwqe_setup_vf_jtimer(cd);
+ pci_enable_sriov(dev, numvfs);
+ return numvfs;
+ }
+ if (numvfs == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ }
+ return 0;
+}
+
+static struct pci_error_handlers genwqe_err_handler = {
+ .error_detected = genwqe_err_error_detected,
+ .mmio_enabled = genwqe_err_result_none,
+ .link_reset = genwqe_err_result_none,
+ .slot_reset = genwqe_err_result_none,
+ .resume = genwqe_err_resume,
+};
+
+static struct pci_driver genwqe_driver = {
+ .name = genwqe_driver_name,
+ .id_table = genwqe_device_table,
+ .probe = genwqe_probe,
+ .remove = genwqe_remove,
+ .sriov_configure = genwqe_sriov_configure,
+ .err_handler = &genwqe_err_handler,
+};
+
+/**
+ * genwqe_init_module() - Driver registration and initialization
+ */
+static int __init genwqe_init_module(void)
+{
+ int rc;
+
+ class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME);
+ if (IS_ERR(class_genwqe)) {
+ pr_err("[%s] create class failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
+ if (!debugfs_genwqe) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ rc = pci_register_driver(&genwqe_driver);
+ if (rc != 0) {
+ pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc);
+ goto err_out0;
+ }
+
+ return rc;
+
+ err_out0:
+ debugfs_remove(debugfs_genwqe);
+ err_out:
+ class_destroy(class_genwqe);
+ return rc;
+}
+
+/**
+ * genwqe_exit_module() - Driver exit
+ */
+static void __exit genwqe_exit_module(void)
+{
+ pci_unregister_driver(&genwqe_driver);
+ debugfs_remove(debugfs_genwqe);
+ class_destroy(class_genwqe);
+}
+
+module_init(genwqe_init_module);
+module_exit(genwqe_exit_module);
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
new file mode 100644
index 0000000..5e4dbd2
--- /dev/null
+++ b/drivers/misc/genwqe/card_base.h
@@ -0,0 +1,557 @@
+#ifndef __CARD_BASE_H__
+#define __CARD_BASE_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Interfaces within the GenWQE module. Defines genwqe_card and
+ * ddcb_queue as well as ddcb_requ.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/stringify.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/version.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <linux/genwqe/genwqe_card.h>
+#include "genwqe_driver.h"
+
+#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
+#define GENWQE_FLAG_MSI_ENABLED (1 << 0)
+
+#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
+#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
+#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
+
+/* Compile parameters, some of them appear in debugfs for later adjustment */
+#define genwqe_ddcb_max 32 /* DDCBs on the work-queue */
+#define genwqe_polling_enabled 0 /* in case of irqs not working */
+#define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */
+#define genwqe_kill_timeout 8 /* time until process gets killed */
+#define genwqe_vf_jobtimeout_msec 250 /* 250 msec */
+#define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */
+#define genwqe_health_check_interval 4 /* <= 0: disabled */
+
+/* Sysfs attribute groups used when we create the genwqe device */
+extern const struct attribute_group *genwqe_attribute_groups[];
+
+/*
+ * Config space for Genwqe5 A7:
+ * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00
+ * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00
+ * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04]
+ * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00
+ */
+#define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */
+
+#define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */
+#define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */
+#define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */
+
+#define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
+#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */
+#define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */
+
+#define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */
+
+/**
+ * struct genwqe_reg - Genwqe data dump functionality
+ */
+struct genwqe_reg {
+ u32 addr;
+ u32 idx;
+ u64 val;
+};
+
+/*
+ * enum genwqe_dbg_type - Specify chip unit to dump/debug
+ */
+enum genwqe_dbg_type {
+ GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */
+ GENWQE_DBG_UNIT1 = 1,
+ GENWQE_DBG_UNIT2 = 2,
+ GENWQE_DBG_UNIT3 = 3,
+ GENWQE_DBG_UNIT4 = 4,
+ GENWQE_DBG_UNIT5 = 5,
+ GENWQE_DBG_UNIT6 = 6,
+ GENWQE_DBG_UNIT7 = 7,
+ GENWQE_DBG_REGS = 8,
+ GENWQE_DBG_DMA = 9,
+ GENWQE_DBG_UNITS = 10, /* max number of possible debug units */
+};
+
+/* Software error injection to simulate card failures */
+#define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */
+#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */
+#define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */
+#define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */
+
+/*
+ * Genwqe card description and management data.
+ *
+ * Error-handling in case of card malfunction
+ * ------------------------------------------
+ *
+ * If the card is detected to be defective the outside environment
+ * will cause the PCI layer to call deinit (the cleanup function for
+ * probe). This is the same effect like doing a unbind/bind operation
+ * on the card.
+ *
+ * The genwqe card driver implements a health checking thread which
+ * verifies the card function. If this detects a problem the cards
+ * device is being shutdown and restarted again, along with a reset of
+ * the card and queue.
+ *
+ * All functions accessing the card device return either -EIO or -ENODEV
+ * code to indicate the malfunction to the user. The user has to close
+ * the file descriptor and open a new one, once the card becomes
+ * available again.
+ *
+ * If the open file descriptor is setup to receive SIGIO, the signal is
+ * genereated for the application which has to provide a handler to
+ * react on it. If the application does not close the open
+ * file descriptor a SIGKILL is send to enforce freeing the cards
+ * resources.
+ *
+ * I did not find a different way to prevent kernel problems due to
+ * reference counters for the cards character devices getting out of
+ * sync. The character device deallocation does not block, even if
+ * there is still an open file descriptor pending. If this pending
+ * descriptor is closed, the data structures used by the character
+ * device is reinstantiated, which will lead to the reference counter
+ * dropping below the allowed values.
+ *
+ * Card recovery
+ * -------------
+ *
+ * To test the internal driver recovery the following command can be used:
+ * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject'
+ */
+
+
+/**
+ * struct dma_mapping_type - Mapping type definition
+ *
+ * To avoid memcpying data arround we use user memory directly. To do
+ * this we need to pin/swap-in the memory and request a DMA address
+ * for it.
+ */
+enum dma_mapping_type {
+ GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */
+ GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */
+ GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */
+};
+
+/**
+ * struct dma_mapping - Information about memory mappings done by the driver
+ */
+struct dma_mapping {
+ enum dma_mapping_type type;
+
+ void *u_vaddr; /* user-space vaddr/non-aligned */
+ void *k_vaddr; /* kernel-space vaddr/non-aligned */
+ dma_addr_t dma_addr; /* physical DMA address */
+
+ struct page **page_list; /* list of pages used by user buff */
+ dma_addr_t *dma_list; /* list of dma addresses per page */
+ unsigned int nr_pages; /* number of pages */
+ unsigned int size; /* size in bytes */
+
+ struct list_head card_list; /* list of usr_maps for card */
+ struct list_head pin_list; /* list of pinned memory for dev */
+};
+
+static inline void genwqe_mapping_init(struct dma_mapping *m,
+ enum dma_mapping_type type)
+{
+ memset(m, 0, sizeof(*m));
+ m->type = type;
+}
+
+/**
+ * struct ddcb_queue - DDCB queue data
+ * @ddcb_max: Number of DDCBs on the queue
+ * @ddcb_next: Next free DDCB
+ * @ddcb_act: Next DDCB supposed to finish
+ * @ddcb_seq: Sequence number of last DDCB
+ * @ddcbs_in_flight: Currently enqueued DDCBs
+ * @ddcbs_completed: Number of already completed DDCBs
+ * @busy: Number of -EBUSY returns
+ * @ddcb_daddr: DMA address of first DDCB in the queue
+ * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue
+ * @ddcb_req: Associated requests (one per DDCB)
+ * @ddcb_waitqs: Associated wait queues (one per DDCB)
+ * @ddcb_lock: Lock to protect queuing operations
+ * @ddcb_waitq: Wait on next DDCB finishing
+ */
+
+struct ddcb_queue {
+ int ddcb_max; /* amount of DDCBs */
+ int ddcb_next; /* next available DDCB num */
+ int ddcb_act; /* DDCB to be processed */
+ u16 ddcb_seq; /* slc seq num */
+ unsigned int ddcbs_in_flight; /* number of ddcbs in processing */
+ unsigned int ddcbs_completed;
+ unsigned int ddcbs_max_in_flight;
+ unsigned int busy; /* how many times -EBUSY? */
+
+ dma_addr_t ddcb_daddr; /* DMA address */
+ struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */
+ struct ddcb_requ **ddcb_req; /* ddcb processing parameter */
+ wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */
+
+ spinlock_t ddcb_lock; /* exclusive access to queue */
+ wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */
+
+ /* registers or the respective queue to be used */
+ u32 IO_QUEUE_CONFIG;
+ u32 IO_QUEUE_STATUS;
+ u32 IO_QUEUE_SEGMENT;
+ u32 IO_QUEUE_INITSQN;
+ u32 IO_QUEUE_WRAP;
+ u32 IO_QUEUE_OFFSET;
+ u32 IO_QUEUE_WTIME;
+ u32 IO_QUEUE_ERRCNTS;
+ u32 IO_QUEUE_LRW;
+};
+
+/*
+ * GFIR, SLU_UNITCFG, APP_UNITCFG
+ * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC.
+ */
+#define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
+
+struct genwqe_ffdc {
+ unsigned int entries;
+ struct genwqe_reg *regs;
+};
+
+/**
+ * struct genwqe_dev - GenWQE device information
+ * @card_state: Card operation state, see above
+ * @ffdc: First Failure Data Capture buffers for each unit
+ * @card_thread: Working thread to operate the DDCB queue
+ * @card_waitq: Wait queue used in card_thread
+ * @queue: DDCB queue
+ * @health_thread: Card monitoring thread (only for PFs)
+ * @health_waitq: Wait queue used in health_thread
+ * @pci_dev: Associated PCI device (function)
+ * @mmio: Base address of 64-bit register space
+ * @mmio_len: Length of register area
+ * @file_lock: Lock to protect access to file_list
+ * @file_list: List of all processes with open GenWQE file descriptors
+ *
+ * This struct contains all information needed to communicate with a
+ * GenWQE card. It is initialized when a GenWQE device is found and
+ * destroyed when it goes away. It holds data to maintain the queue as
+ * well as data needed to feed the user interfaces.
+ */
+struct genwqe_dev {
+ enum genwqe_card_state card_state;
+ spinlock_t print_lock;
+
+ int card_idx; /* card index 0..CARD_NO_MAX-1 */
+ u64 flags; /* general flags */
+
+ /* FFDC data gathering */
+ struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
+
+ /* DDCB workqueue */
+ struct task_struct *card_thread;
+ wait_queue_head_t queue_waitq;
+ struct ddcb_queue queue; /* genwqe DDCB queue */
+ unsigned int irqs_processed;
+
+ /* Card health checking thread */
+ struct task_struct *health_thread;
+ wait_queue_head_t health_waitq;
+
+ /* char device */
+ dev_t devnum_genwqe; /* major/minor num card */
+ struct class *class_genwqe; /* reference to class object */
+ struct device *dev; /* for device creation */
+ struct cdev cdev_genwqe; /* char device for card */
+
+ struct dentry *debugfs_root; /* debugfs card root directory */
+ struct dentry *debugfs_genwqe; /* debugfs driver root directory */
+
+ /* pci resources */
+ struct pci_dev *pci_dev; /* PCI device */
+ void __iomem *mmio; /* BAR-0 MMIO start */
+ unsigned long mmio_len;
+ u16 num_vfs;
+ u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
+ int is_privileged; /* access to all regs possible */
+
+ /* config regs which we need often */
+ u64 slu_unitcfg;
+ u64 app_unitcfg;
+ u64 softreset;
+ u64 err_inject;
+ u64 last_gfir;
+ char app_name[5];
+
+ spinlock_t file_lock; /* lock for open files */
+ struct list_head file_list; /* list of open files */
+
+ /* debugfs parameters */
+ int ddcb_software_timeout; /* wait until DDCB times out */
+ int skip_recovery; /* circumvention if recovery fails */
+ int kill_timeout; /* wait after sending SIGKILL */
+};
+
+/**
+ * enum genwqe_requ_state - State of a DDCB execution request
+ */
+enum genwqe_requ_state {
+ GENWQE_REQU_NEW = 0,
+ GENWQE_REQU_ENQUEUED = 1,
+ GENWQE_REQU_TAPPED = 2,
+ GENWQE_REQU_FINISHED = 3,
+ GENWQE_REQU_STATE_MAX,
+};
+
+/**
+ * struct ddcb_requ - Kernel internal representation of the DDCB request
+ * @cmd: User space representation of the DDCB execution request
+ */
+struct ddcb_requ {
+ /* kernel specific content */
+ enum genwqe_requ_state req_state; /* request status */
+ int num; /* ddcb_no for this request */
+ struct ddcb_queue *queue; /* associated queue */
+
+ struct dma_mapping dma_mappings[DDCB_FIXUPS];
+ struct sg_entry *sgl[DDCB_FIXUPS];
+ dma_addr_t sgl_dma_addr[DDCB_FIXUPS];
+ size_t sgl_size[DDCB_FIXUPS];
+
+ /* kernel/user shared content */
+ struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
+ struct genwqe_debug_data debug_data;
+};
+
+/**
+ * struct genwqe_file - Information for open GenWQE devices
+ */
+struct genwqe_file {
+ struct genwqe_dev *cd;
+ struct genwqe_driver *client;
+ struct file *filp;
+
+ struct fasync_struct *async_queue;
+ struct task_struct *owner;
+ struct list_head list; /* entry in list of open files */
+
+ spinlock_t map_lock; /* lock for dma_mappings */
+ struct list_head map_list; /* list of dma_mappings */
+
+ spinlock_t pin_lock; /* lock for pinned memory */
+ struct list_head pin_list; /* list of pinned memory */
+};
+
+int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */
+int genwqe_finish_queue(struct genwqe_dev *cd);
+int genwqe_release_service_layer(struct genwqe_dev *cd);
+
+/**
+ * genwqe_get_slu_id() - Read Service Layer Unit Id
+ * Return: 0x00: Development code
+ * 0x01: SLC1 (old)
+ * 0x02: SLC2 (sept2012)
+ * 0x03: SLC2 (feb2013, generic driver)
+ */
+static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
+{
+ return (int)((cd->slu_unitcfg >> 32) & 0xff);
+}
+
+int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
+
+u8 genwqe_card_type(struct genwqe_dev *cd);
+int genwqe_card_reset(struct genwqe_dev *cd);
+int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
+void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
+
+int genwqe_device_create(struct genwqe_dev *cd);
+int genwqe_device_remove(struct genwqe_dev *cd);
+
+/* debugfs */
+int genwqe_init_debugfs(struct genwqe_dev *cd);
+void genqwe_exit_debugfs(struct genwqe_dev *cd);
+
+int genwqe_read_softreset(struct genwqe_dev *cd);
+
+/* Hardware Circumventions */
+int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
+int genwqe_flash_readback_fails(struct genwqe_dev *cd);
+
+/**
+ * genwqe_write_vreg() - Write register in VF window
+ * @cd: genwqe device
+ * @reg: register address
+ * @val: value to write
+ * @func: 0: PF, 1: VF0, ..., 15: VF14
+ */
+int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
+
+/**
+ * genwqe_read_vreg() - Read register in VF window
+ * @cd: genwqe device
+ * @reg: register address
+ * @func: 0: PF, 1: VF0, ..., 15: VF14
+ *
+ * Return: content of the register
+ */
+u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
+
+/* FFDC Buffer Management */
+int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
+int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
+ struct genwqe_reg *regs, unsigned int max_regs);
+int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
+ unsigned int max_regs, int all);
+int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
+ struct genwqe_reg *regs, unsigned int max_regs);
+
+int genwqe_init_debug_data(struct genwqe_dev *cd,
+ struct genwqe_debug_data *d);
+
+void genwqe_init_crc32(void);
+int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
+
+/* Memory allocation/deallocation; dma address handling */
+int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
+ void *uaddr, unsigned long size,
+ struct ddcb_requ *req);
+
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
+ struct ddcb_requ *req);
+
+struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
+ dma_addr_t *dma_addr, size_t *sgl_size);
+
+void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
+ dma_addr_t dma_addr, size_t size);
+
+int genwqe_setup_sgl(struct genwqe_dev *cd,
+ unsigned long offs,
+ unsigned long size,
+ struct sg_entry *sgl, /* genwqe sgl */
+ dma_addr_t dma_addr, size_t sgl_size,
+ dma_addr_t *dma_list, int page_offs, int num_pages);
+
+int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
+ int size);
+
+static inline bool dma_mapping_used(struct dma_mapping *m)
+{
+ if (!m)
+ return 0;
+ return m->size != 0;
+}
+
+/**
+ * __genwqe_execute_ddcb() - Execute DDCB request with addr translation
+ *
+ * This function will do the address translation changes to the DDCBs
+ * according to the definitions required by the ATS field. It looks up
+ * the memory allocation buffer or does vmap/vunmap for the respective
+ * user-space buffers, inclusive page pinning and scatter gather list
+ * buildup and teardown.
+ */
+int __genwqe_execute_ddcb(struct genwqe_dev *cd,
+ struct genwqe_ddcb_cmd *cmd);
+
+/**
+ * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
+ *
+ * This version will not do address translation or any modifcation of
+ * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
+ * entirely prepared by the driver itself. That means the appropriate
+ * DMA addresses are already in the DDCB and do not need any
+ * modification.
+ */
+int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
+ struct genwqe_ddcb_cmd *cmd);
+
+int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
+int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
+int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
+
+/* register access */
+int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
+u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
+int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
+u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
+
+void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
+ dma_addr_t *dma_handle);
+void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/* Base clock frequency in MHz */
+int genwqe_base_clock_frequency(struct genwqe_dev *cd);
+
+/* Before FFDC is captured the traps should be stopped. */
+void genwqe_stop_traps(struct genwqe_dev *cd);
+void genwqe_start_traps(struct genwqe_dev *cd);
+
+/* Hardware circumvention */
+bool genwqe_need_err_masking(struct genwqe_dev *cd);
+
+/**
+ * genwqe_is_privileged() - Determine operation mode for PCI function
+ *
+ * On Intel with SRIOV support we see:
+ * PF: is_physfn = 1 is_virtfn = 0
+ * VF: is_physfn = 0 is_virtfn = 1
+ *
+ * On Systems with no SRIOV support _and_ virtualized systems we get:
+ * is_physfn = 0 is_virtfn = 0
+ *
+ * Other vendors have individual pci device ids to distinguish between
+ * virtual function drivers and physical function drivers. GenWQE
+ * unfortunately has just on pci device id for both, VFs and PF.
+ *
+ * The following code is used to distinguish if the card is running in
+ * privileged mode, either as true PF or in a virtualized system with
+ * full register access e.g. currently on PowerPC.
+ *
+ * if (pci_dev->is_virtfn)
+ * cd->is_privileged = 0;
+ * else
+ * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
+ * != IO_ILLEGAL_VALUE);
+ */
+static inline int genwqe_is_privileged(struct genwqe_dev *cd)
+{
+ return cd->is_privileged;
+}
+
+#endif /* __CARD_BASE_H__ */
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
new file mode 100644
index 0000000..6f1acc0
--- /dev/null
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -0,0 +1,1376 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Device Driver Control Block (DDCB) queue support. Definition of
+ * interrupt handlers for queue support as well as triggering the
+ * health monitor code in case of problems. The current hardware uses
+ * an MSI interrupt which is shared between error handling and
+ * functional code.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/crc-itu-t.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+/*
+ * N: next DDCB, this is where the next DDCB will be put.
+ * A: active DDCB, this is where the code will look for the next completion.
+ * x: DDCB is enqueued, we are waiting for its completion.
+
+ * Situation (1): Empty queue
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | | | | | | |
+ * +---+---+---+---+---+---+---+---+
+ * A/N
+ * enqueued_ddcbs = A - N = 2 - 2 = 0
+ *
+ * Situation (2): Wrapped, N > A
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | x | x | | | | |
+ * +---+---+---+---+---+---+---+---+
+ * A N
+ * enqueued_ddcbs = N - A = 4 - 2 = 2
+ *
+ * Situation (3): Queue wrapped, A > N
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | x | x | | | x | x | x | x |
+ * +---+---+---+---+---+---+---+---+
+ * N A
+ * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6
+ *
+ * Situation (4a): Queue full N > A
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | x | x | x | x | x | x | x | |
+ * +---+---+---+---+---+---+---+---+
+ * A N
+ *
+ * enqueued_ddcbs = N - A = 7 - 0 = 7
+ *
+ * Situation (4a): Queue full A > N
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | x | x | x | | x | x | x | x |
+ * +---+---+---+---+---+---+---+---+
+ * N A
+ * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7
+ */
+
+static int queue_empty(struct ddcb_queue *queue)
+{
+ return queue->ddcb_next == queue->ddcb_act;
+}
+
+static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
+{
+ if (queue->ddcb_next >= queue->ddcb_act)
+ return queue->ddcb_next - queue->ddcb_act;
+
+ return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
+}
+
+static int queue_free_ddcbs(struct ddcb_queue *queue)
+{
+ int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
+
+ if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */
+ return 0;
+ }
+ return free_ddcbs;
+}
+
+/*
+ * Use of the PRIV field in the DDCB for queue debugging:
+ *
+ * (1) Trying to get rid of a DDCB which saw a timeout:
+ * pddcb->priv[6] = 0xcc; # cleared
+ *
+ * (2) Append a DDCB via NEXT bit:
+ * pddcb->priv[7] = 0xaa; # appended
+ *
+ * (3) DDCB needed tapping:
+ * pddcb->priv[7] = 0xbb; # tapped
+ *
+ * (4) DDCB marked as correctly finished:
+ * pddcb->priv[6] = 0xff; # finished
+ */
+
+static inline void ddcb_mark_tapped(struct ddcb *pddcb)
+{
+ pddcb->priv[7] = 0xbb; /* tapped */
+}
+
+static inline void ddcb_mark_appended(struct ddcb *pddcb)
+{
+ pddcb->priv[7] = 0xaa; /* appended */
+}
+
+static inline void ddcb_mark_cleared(struct ddcb *pddcb)
+{
+ pddcb->priv[6] = 0xcc; /* cleared */
+}
+
+static inline void ddcb_mark_finished(struct ddcb *pddcb)
+{
+ pddcb->priv[6] = 0xff; /* finished */
+}
+
+static inline void ddcb_mark_unused(struct ddcb *pddcb)
+{
+ pddcb->priv_64 = cpu_to_be64(0); /* not tapped */
+}
+
+/**
+ * genwqe_crc16() - Generate 16-bit crc as required for DDCBs
+ * @buff: pointer to data buffer
+ * @len: length of data for calculation
+ * @init: initial crc (0xffff at start)
+ *
+ * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021)
+ * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff
+ * should result in a crc16 of 0x89c3
+ *
+ * Return: crc16 checksum in big endian format !
+ */
+static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
+{
+ return crc_itu_t(init, buff, len);
+}
+
+static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
+{
+ int i;
+ struct ddcb *pddcb;
+ unsigned long flags;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ spin_lock_irqsave(&cd->print_lock, flags);
+
+ dev_info(&pci_dev->dev,
+ "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
+ cd->card_idx, queue->ddcb_act, queue->ddcb_next);
+
+ pddcb = queue->ddcb_vaddr;
+ for (i = 0; i < queue->ddcb_max; i++) {
+ dev_err(&pci_dev->dev,
+ " %c %-3d: RETC=%03x SEQ=%04x "
+ "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
+ i == queue->ddcb_act ? '>' : ' ',
+ i,
+ be16_to_cpu(pddcb->retc_16),
+ be16_to_cpu(pddcb->seqnum_16),
+ pddcb->hsi,
+ pddcb->shi,
+ be64_to_cpu(pddcb->priv_64),
+ pddcb->cmd);
+ pddcb++;
+ }
+ spin_unlock_irqrestore(&cd->print_lock, flags);
+}
+
+struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
+{
+ struct ddcb_requ *req;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ return &req->cmd;
+}
+
+void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
+{
+ struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
+ kfree(req);
+}
+
+static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
+{
+ return req->req_state;
+}
+
+static inline void ddcb_requ_set_state(struct ddcb_requ *req,
+ enum genwqe_requ_state new_state)
+{
+ req->req_state = new_state;
+}
+
+static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
+{
+ return req->cmd.ddata_addr != 0x0;
+}
+
+/**
+ * ddcb_requ_finished() - Returns the hardware state of the associated DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @req: DDCB work request
+ *
+ * Status of ddcb_requ mirrors this hardware state, but is copied in
+ * the ddcb_requ on interrupt/polling function. The lowlevel code
+ * should check the hardware state directly, the higher level code
+ * should check the copy.
+ *
+ * This function will also return true if the state of the queue is
+ * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the
+ * shutdown case.
+ */
+static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
+ (cd->card_state != GENWQE_CARD_USED);
+}
+
+/**
+ * enqueue_ddcb() - Enqueue a DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @queue: queue this operation should be done on
+ * @ddcb_no: pointer to ddcb number being tapped
+ *
+ * Start execution of DDCB by tapping or append to queue via NEXT
+ * bit. This is done by an atomic 'compare and swap' instruction and
+ * checking SHI and HSI of the previous DDCB.
+ *
+ * This function must only be called with ddcb_lock held.
+ *
+ * Return: 1 if new DDCB is appended to previous
+ * 2 if DDCB queue is tapped via register/simulation
+ */
+#define RET_DDCB_APPENDED 1
+#define RET_DDCB_TAPPED 2
+
+static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
+ struct ddcb *pddcb, int ddcb_no)
+{
+ unsigned int try;
+ int prev_no;
+ struct ddcb *prev_ddcb;
+ __be32 old, new, icrc_hsi_shi;
+ u64 num;
+
+ /*
+ * For performance checks a Dispatch Timestamp can be put into
+ * DDCB It is supposed to use the SLU's free running counter,
+ * but this requires PCIe cycles.
+ */
+ ddcb_mark_unused(pddcb);
+
+ /* check previous DDCB if already fetched */
+ prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
+ prev_ddcb = &queue->ddcb_vaddr[prev_no];
+
+ /*
+ * It might have happened that the HSI.FETCHED bit is
+ * set. Retry in this case. Therefore I expect maximum 2 times
+ * trying.
+ */
+ ddcb_mark_appended(pddcb);
+ for (try = 0; try < 2; try++) {
+ old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
+
+ /* try to append via NEXT bit if prev DDCB is not completed */
+ if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
+ break;
+
+ new = (old | DDCB_NEXT_BE32);
+ icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
+
+ if (icrc_hsi_shi == old)
+ return RET_DDCB_APPENDED; /* appended to queue */
+ }
+
+ /* Queue must be re-started by updating QUEUE_OFFSET */
+ ddcb_mark_tapped(pddcb);
+ num = (u64)ddcb_no << 8;
+ __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
+
+ return RET_DDCB_TAPPED;
+}
+
+/**
+ * copy_ddcb_results() - Copy output state from real DDCB to request
+ *
+ * Copy DDCB ASV to request struct. There is no endian
+ * conversion made, since data structure in ASV is still
+ * unknown here.
+ *
+ * This is needed by:
+ * - genwqe_purge_ddcb()
+ * - genwqe_check_ddcb_queue()
+ */
+static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
+{
+ struct ddcb_queue *queue = req->queue;
+ struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
+
+ memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
+
+ /* copy status flags of the variant part */
+ req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
+ req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
+ req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
+
+ req->cmd.attn = be16_to_cpu(pddcb->attn_16);
+ req->cmd.progress = be32_to_cpu(pddcb->progress_32);
+ req->cmd.retc = be16_to_cpu(pddcb->retc_16);
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ int prev_no = (ddcb_no == 0) ?
+ queue->ddcb_max - 1 : ddcb_no - 1;
+ struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
+
+ memcpy(&req->debug_data.ddcb_finished, pddcb,
+ sizeof(req->debug_data.ddcb_finished));
+ memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
+ sizeof(req->debug_data.ddcb_prev));
+ }
+}
+
+/**
+ * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
+ * @cd: pointer to genwqe device descriptor
+ *
+ * Return: Number of DDCBs which were finished
+ */
+static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
+ struct ddcb_queue *queue)
+{
+ unsigned long flags;
+ int ddcbs_finished = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ /* FIXME avoid soft locking CPU */
+ while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
+
+ struct ddcb *pddcb;
+ struct ddcb_requ *req;
+ u16 vcrc, vcrc_16, retc_16;
+
+ pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
+
+ if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
+ 0x00000000)
+ goto go_home; /* not completed, continue waiting */
+
+ /* Note: DDCB could be purged */
+
+ req = queue->ddcb_req[queue->ddcb_act];
+ if (req == NULL) {
+ /* this occurs if DDCB is purged, not an error */
+ /* Move active DDCB further; Nothing to do anymore. */
+ goto pick_next_one;
+ }
+
+ /*
+ * HSI=0x44 (fetched and completed), but RETC is
+ * 0x101, or even worse 0x000.
+ *
+ * In case of seeing the queue in inconsistent state
+ * we read the errcnts and the queue status to provide
+ * a trigger for our PCIe analyzer stop capturing.
+ */
+ retc_16 = be16_to_cpu(pddcb->retc_16);
+ if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
+ u64 errcnts, status;
+ u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
+
+ errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
+ status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
+
+ dev_err(&pci_dev->dev,
+ "[%s] SEQN=%04x HSI=%02x RETC=%03x "
+ " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n"
+ " DDCB_DMA_ADDR=%016llx\n",
+ __func__, be16_to_cpu(pddcb->seqnum_16),
+ pddcb->hsi, retc_16, errcnts, status,
+ queue->ddcb_daddr + ddcb_offs);
+ }
+
+ copy_ddcb_results(req, queue->ddcb_act);
+ queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
+
+ dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ ddcb_mark_finished(pddcb);
+
+ /* calculate CRC_16 to see if VCRC is correct */
+ vcrc = genwqe_crc16(pddcb->asv,
+ VCRC_LENGTH(req->cmd.asv_length),
+ 0xffff);
+ vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
+ if (vcrc != vcrc_16) {
+ printk_ratelimited(KERN_ERR
+ "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d "
+ "bytes vcrc_data=%04x is not vcrc_card=%04x\n",
+ GENWQE_DEVNAME, dev_name(&pci_dev->dev),
+ pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
+ vcrc, vcrc_16);
+ }
+
+ ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
+ queue->ddcbs_completed++;
+ queue->ddcbs_in_flight--;
+
+ /* wake up process waiting for this DDCB */
+ wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
+
+pick_next_one:
+ queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
+ ddcbs_finished++;
+ }
+
+ go_home:
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return ddcbs_finished;
+}
+
+/**
+ * __genwqe_wait_ddcb(): Waits until DDCB is completed
+ * @cd: pointer to genwqe device descriptor
+ * @req: pointer to requsted DDCB parameters
+ *
+ * The Service Layer will update the RETC in DDCB when processing is
+ * pending or done.
+ *
+ * Return: > 0 remaining jiffies, DDCB completed
+ * -ETIMEDOUT when timeout
+ * -ERESTARTSYS when ^C
+ * -EINVAL when unknown error condition
+ *
+ * When an error is returned the called needs to ensure that
+ * purge_ddcb() is being called to get the &req removed from the
+ * queue.
+ */
+int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ int rc;
+ unsigned int ddcb_no;
+ struct ddcb_queue *queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (req == NULL)
+ return -EINVAL;
+
+ queue = req->queue;
+ if (queue == NULL)
+ return -EINVAL;
+
+ ddcb_no = req->num;
+ if (ddcb_no >= queue->ddcb_max)
+ return -EINVAL;
+
+ rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
+ ddcb_requ_finished(cd, req),
+ genwqe_ddcb_software_timeout * HZ);
+
+ /*
+ * We need to distinguish 3 cases here:
+ * 1. rc == 0 timeout occured
+ * 2. rc == -ERESTARTSYS signal received
+ * 3. rc > 0 remaining jiffies condition is true
+ */
+ if (rc == 0) {
+ struct ddcb_queue *queue = req->queue;
+ struct ddcb *pddcb;
+
+ /*
+ * Timeout may be caused by long task switching time.
+ * When timeout happens, check if the request has
+ * meanwhile completed.
+ */
+ genwqe_check_ddcb_queue(cd, req->queue);
+ if (ddcb_requ_finished(cd, req))
+ return rc;
+
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
+ __func__, req->num, rc, ddcb_requ_get_state(req),
+ req);
+ dev_err(&pci_dev->dev,
+ "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
+ __genwqe_readq(cd, queue->IO_QUEUE_STATUS));
+
+ pddcb = &queue->ddcb_vaddr[req->num];
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ print_ddcb_info(cd, req->queue);
+ return -ETIMEDOUT;
+
+ } else if (rc == -ERESTARTSYS) {
+ return rc;
+ /*
+ * EINTR: Stops the application
+ * ERESTARTSYS: Restartable systemcall; called again
+ */
+
+ } else if (rc < 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
+ __func__, req->num, rc, ddcb_requ_get_state(req));
+ return -EINVAL;
+ }
+
+ /* Severe error occured. Driver is forced to stop operation */
+ if (cd->card_state != GENWQE_CARD_USED) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d forced to stop (rc=%d)\n",
+ __func__, req->num, rc);
+ return -EIO;
+ }
+ return rc;
+}
+
+/**
+ * get_next_ddcb() - Get next available DDCB
+ * @cd: pointer to genwqe device descriptor
+ *
+ * DDCB's content is completely cleared but presets for PRE and
+ * SEQNUM. This function must only be called when ddcb_lock is held.
+ *
+ * Return: NULL if no empty DDCB available otherwise ptr to next DDCB.
+ */
+static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
+ struct ddcb_queue *queue,
+ int *num)
+{
+ u64 *pu64;
+ struct ddcb *pddcb;
+
+ if (queue_free_ddcbs(queue) == 0) /* queue is full */
+ return NULL;
+
+ /* find new ddcb */
+ pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
+
+ /* if it is not completed, we are not allowed to use it */
+ /* barrier(); */
+ if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
+ return NULL;
+
+ *num = queue->ddcb_next; /* internal DDCB number */
+ queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
+
+ /* clear important DDCB fields */
+ pu64 = (u64 *)pddcb;
+ pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */
+ pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */
+
+ /* destroy previous results in ASV */
+ pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */
+ pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */
+ pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */
+ pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */
+ pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */
+
+ pddcb->pre = DDCB_PRESET_PRE; /* 128 */
+ pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
+ return pddcb;
+}
+
+/**
+ * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue
+ * @cd: genwqe device descriptor
+ * @req: DDCB request
+ *
+ * This will fail when the request was already FETCHED. In this case
+ * we need to wait until it is finished. Else the DDCB can be
+ * reused. This function also ensures that the request data structure
+ * is removed from ddcb_req[].
+ *
+ * Do not forget to call this function when genwqe_wait_ddcb() fails,
+ * such that the request gets really removed from ddcb_req[].
+ *
+ * Return: 0 success
+ */
+int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ struct ddcb *pddcb = NULL;
+ unsigned int t;
+ unsigned long flags;
+ struct ddcb_queue *queue = req->queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ u64 queue_status;
+ __be32 icrc_hsi_shi = 0x0000;
+ __be32 old, new;
+
+ /* unsigned long flags; */
+ if (genwqe_ddcb_software_timeout <= 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: software timeout is not set!\n", __func__);
+ return -EFAULT;
+ }
+
+ pddcb = &queue->ddcb_vaddr[req->num];
+
+ for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) {
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ /* Check if req was meanwhile finished */
+ if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
+ goto go_home;
+
+ /* try to set PURGE bit if FETCHED/COMPLETED are not set */
+ old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
+ if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
+
+ new = (old | DDCB_PURGE_BE32);
+ icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
+ old, new);
+ if (icrc_hsi_shi == old)
+ goto finish_ddcb;
+ }
+
+ /* normal finish with HSI bit */
+ barrier();
+ icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
+ if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
+ goto finish_ddcb;
+
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ /*
+ * Here the check_ddcb() function will most likely
+ * discover this DDCB to be finished some point in
+ * time. It will mark the req finished and free it up
+ * in the list.
+ */
+
+ copy_ddcb_results(req, req->num); /* for the failing case */
+ msleep(100); /* sleep for 1/10 second and try again */
+ continue;
+
+finish_ddcb:
+ copy_ddcb_results(req, req->num);
+ ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
+ queue->ddcbs_in_flight--;
+ queue->ddcb_req[req->num] = NULL; /* delete from array */
+ ddcb_mark_cleared(pddcb);
+
+ /* Move active DDCB further; Nothing to do here anymore. */
+
+ /*
+ * We need to ensure that there is at least one free
+ * DDCB in the queue. To do that, we must update
+ * ddcb_act only if the COMPLETED bit is set for the
+ * DDCB we are working on else we treat that DDCB even
+ * if we PURGED it as occupied (hardware is supposed
+ * to set the COMPLETED bit yet!).
+ */
+ icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
+ if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
+ (queue->ddcb_act == req->num)) {
+ queue->ddcb_act = ((queue->ddcb_act + 1) %
+ queue->ddcb_max);
+ }
+go_home:
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 0;
+ }
+
+ /*
+ * If the card is dead and the queue is forced to stop, we
+ * might see this in the queue status register.
+ */
+ queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
+
+ dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d not purged and not completed "
+ "after %d seconds QSTAT=%016llx!!\n",
+ __func__, req->num, genwqe_ddcb_software_timeout,
+ queue_status);
+
+ print_ddcb_info(cd, req->queue);
+
+ return -EFAULT;
+}
+
+int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
+{
+ int len;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (d == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: invalid memory for debug data!\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ len = sizeof(d->driver_version);
+ snprintf(d->driver_version, len, "%s", DRV_VERS_STRING);
+ d->slu_unitcfg = cd->slu_unitcfg;
+ d->app_unitcfg = cd->app_unitcfg;
+ return 0;
+}
+
+/**
+ * __genwqe_enqueue_ddcb() - Enqueue a DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @req: pointer to DDCB execution request
+ *
+ * Return: 0 if enqueuing succeeded
+ * -EIO if card is unusable/PCIe problems
+ * -EBUSY if enqueuing failed
+ */
+int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ struct ddcb *pddcb;
+ unsigned long flags;
+ struct ddcb_queue *queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ u16 icrc;
+
+ if (cd->card_state != GENWQE_CARD_USED) {
+ printk_ratelimited(KERN_ERR
+ "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
+ GENWQE_DEVNAME, dev_name(&pci_dev->dev),
+ __func__, req->num);
+ return -EIO;
+ }
+
+ queue = req->queue = &cd->queue;
+
+ /* FIXME circumvention to improve performance when no irq is
+ * there.
+ */
+ if (genwqe_polling_enabled)
+ genwqe_check_ddcb_queue(cd, queue);
+
+ /*
+ * It must be ensured to process all DDCBs in successive
+ * order. Use a lock here in order to prevent nested DDCB
+ * enqueuing.
+ */
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
+ if (pddcb == NULL) {
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ queue->busy++;
+ return -EBUSY;
+ }
+
+ if (queue->ddcb_req[req->num] != NULL) {
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ dev_err(&pci_dev->dev,
+ "[%s] picked DDCB %d with req=%p still in use!!\n",
+ __func__, req->num, req);
+ return -EFAULT;
+ }
+ ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
+ queue->ddcb_req[req->num] = req;
+
+ pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
+ pddcb->cmd = req->cmd.cmd;
+ pddcb->acfunc = req->cmd.acfunc; /* functional unit */
+
+ /*
+ * We know that we can get retc 0x104 with CRC error, do not
+ * stop the queue in those cases for this command. XDIR = 1
+ * does not work for old SLU versions.
+ *
+ * Last bitstream with the old XDIR behavior had SLU_ID
+ * 0x34199.
+ */
+ if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
+ pddcb->xdir = 0x1;
+ else
+ pddcb->xdir = 0x0;
+
+
+ pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
+ ((req->cmd.asv_length / 8)));
+ pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
+
+ /*
+ * If copying the whole DDCB_ASIV_LENGTH is impacting
+ * performance we need to change it to
+ * req->cmd.asiv_length. But simulation benefits from some
+ * non-architectured bits behind the architectured content.
+ *
+ * How much data is copied depends on the availability of the
+ * ATS field, which was introduced late. If the ATS field is
+ * supported ASIV is 8 bytes shorter than it used to be. Since
+ * the ATS field is copied too, the code should do exactly
+ * what it did before, but I wanted to make copying of the ATS
+ * field very explicit.
+ */
+ if (genwqe_get_slu_id(cd) <= 0x2) {
+ memcpy(&pddcb->__asiv[0], /* destination */
+ &req->cmd.__asiv[0], /* source */
+ DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */
+ } else {
+ pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
+ memcpy(&pddcb->n.asiv[0], /* destination */
+ &req->cmd.asiv[0], /* source */
+ DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */
+ }
+
+ pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */
+
+ /*
+ * Calculate CRC_16 for corresponding range PSP(7:4). Include
+ * empty 4 bytes prior to the data.
+ */
+ icrc = genwqe_crc16((const u8 *)pddcb,
+ ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
+ pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
+
+ /* enable DDCB completion irq */
+ if (!genwqe_polling_enabled)
+ pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
+
+ dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ /* use the kernel copy of debug data. copying back to
+ user buffer happens later */
+
+ genwqe_init_debug_data(cd, &req->debug_data);
+ memcpy(&req->debug_data.ddcb_before, pddcb,
+ sizeof(req->debug_data.ddcb_before));
+ }
+
+ enqueue_ddcb(cd, queue, pddcb, req->num);
+ queue->ddcbs_in_flight++;
+
+ if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
+ queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
+
+ ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ wake_up_interruptible(&cd->queue_waitq);
+
+ return 0;
+}
+
+/**
+ * __genwqe_execute_raw_ddcb() - Setup and execute DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @req: user provided DDCB request
+ */
+int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
+ struct genwqe_ddcb_cmd *cmd)
+{
+ int rc = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
+
+ if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
+ dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
+ __func__, cmd->asiv_length);
+ return -EINVAL;
+ }
+ if (cmd->asv_length > DDCB_ASV_LENGTH) {
+ dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
+ __func__, cmd->asiv_length);
+ return -EINVAL;
+ }
+ rc = __genwqe_enqueue_ddcb(cd, req);
+ if (rc != 0)
+ return rc;
+
+ rc = __genwqe_wait_ddcb(cd, req);
+ if (rc < 0) /* error or signal interrupt */
+ goto err_exit;
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ if (copy_to_user((struct genwqe_debug_data __user *)
+ (unsigned long)cmd->ddata_addr,
+ &req->debug_data,
+ sizeof(struct genwqe_debug_data)))
+ return -EFAULT;
+ }
+
+ /*
+ * Higher values than 0x102 indicate completion with faults,
+ * lower values than 0x102 indicate processing faults. Note
+ * that DDCB might have been purged. E.g. Cntl+C.
+ */
+ if (cmd->retc != DDCB_RETC_COMPLETE) {
+ /* This might happen e.g. flash read, and needs to be
+ handled by the upper layer code. */
+ rc = -EBADMSG; /* not processed/error retc */
+ }
+
+ return rc;
+
+ err_exit:
+ __genwqe_purge_ddcb(cd, req);
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ if (copy_to_user((struct genwqe_debug_data __user *)
+ (unsigned long)cmd->ddata_addr,
+ &req->debug_data,
+ sizeof(struct genwqe_debug_data)))
+ return -EFAULT;
+ }
+ return rc;
+}
+
+/**
+ * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
+ *
+ * We use this as condition for our wait-queue code.
+ */
+static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
+{
+ unsigned long flags;
+ struct ddcb *pddcb;
+ struct ddcb_queue *queue = &cd->queue;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ if (queue_empty(queue)) { /* emtpy queue */
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 0;
+ }
+
+ pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
+ if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 1;
+ }
+
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 0;
+}
+
+/**
+ * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
+ *
+ * Keep track on the number of DDCBs which ware currently in the
+ * queue. This is needed for statistics as well as conditon if we want
+ * to wait or better do polling in case of no interrupts available.
+ */
+int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
+{
+ unsigned long flags;
+ int ddcbs_in_flight = 0;
+ struct ddcb_queue *queue = &cd->queue;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+ ddcbs_in_flight += queue->ddcbs_in_flight;
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ return ddcbs_in_flight;
+}
+
+static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
+{
+ int rc, i;
+ struct ddcb *pddcb;
+ u64 val64;
+ unsigned int queue_size;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (genwqe_ddcb_max < 2)
+ return -EINVAL;
+
+ queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
+
+ queue->ddcbs_in_flight = 0; /* statistics */
+ queue->ddcbs_max_in_flight = 0;
+ queue->ddcbs_completed = 0;
+ queue->busy = 0;
+
+ queue->ddcb_seq = 0x100; /* start sequence number */
+ queue->ddcb_max = genwqe_ddcb_max; /* module parameter */
+ queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
+ &queue->ddcb_daddr);
+ if (queue->ddcb_vaddr == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] **err: could not allocate DDCB **\n", __func__);
+ return -ENOMEM;
+ }
+ memset(queue->ddcb_vaddr, 0, queue_size);
+
+ queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) *
+ queue->ddcb_max, GFP_KERNEL);
+ if (!queue->ddcb_req) {
+ rc = -ENOMEM;
+ goto free_ddcbs;
+ }
+
+ queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) *
+ queue->ddcb_max, GFP_KERNEL);
+ if (!queue->ddcb_waitqs) {
+ rc = -ENOMEM;
+ goto free_requs;
+ }
+
+ for (i = 0; i < queue->ddcb_max; i++) {
+ pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
+ pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
+ pddcb->retc_16 = cpu_to_be16(0xfff);
+
+ queue->ddcb_req[i] = NULL; /* requests */
+ init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
+ }
+
+ queue->ddcb_act = 0;
+ queue->ddcb_next = 0; /* queue is empty */
+
+ spin_lock_init(&queue->ddcb_lock);
+ init_waitqueue_head(&queue->ddcb_waitq);
+
+ val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
+ __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
+ __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
+ __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
+ __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
+ return 0;
+
+ free_requs:
+ kfree(queue->ddcb_req);
+ queue->ddcb_req = NULL;
+ free_ddcbs:
+ __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
+ queue->ddcb_daddr);
+ queue->ddcb_vaddr = NULL;
+ queue->ddcb_daddr = 0ull;
+ return -ENODEV;
+
+}
+
+static int ddcb_queue_initialized(struct ddcb_queue *queue)
+{
+ return queue->ddcb_vaddr != NULL;
+}
+
+static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
+{
+ unsigned int queue_size;
+
+ queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
+
+ kfree(queue->ddcb_req);
+ queue->ddcb_req = NULL;
+
+ if (queue->ddcb_vaddr) {
+ __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
+ queue->ddcb_daddr);
+ queue->ddcb_vaddr = NULL;
+ queue->ddcb_daddr = 0ull;
+ }
+}
+
+static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
+{
+ u64 gfir;
+ struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /*
+ * In case of fatal FIR error the queue is stopped, such that
+ * we can safely check it without risking anything.
+ */
+ cd->irqs_processed++;
+ wake_up_interruptible(&cd->queue_waitq);
+
+ /*
+ * Checking for errors before kicking the queue might be
+ * safer, but slower for the good-case ... See above.
+ */
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if ((gfir & GFIR_ERR_TRIGGER) != 0x0) {
+
+ wake_up_interruptible(&cd->health_waitq);
+
+ /*
+ * By default GFIRs causes recovery actions. This
+ * count is just for debug when recovery is masked.
+ */
+ printk_ratelimited(KERN_ERR
+ "%s %s: [%s] GFIR=%016llx\n",
+ GENWQE_DEVNAME, dev_name(&pci_dev->dev),
+ __func__, gfir);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
+{
+ struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
+
+ cd->irqs_processed++;
+ wake_up_interruptible(&cd->queue_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * genwqe_card_thread() - Work thread for the DDCB queue
+ *
+ * The idea is to check if there are DDCBs in processing. If there are
+ * some finished DDCBs, we process them and wakeup the
+ * requestors. Otherwise we give other processes time using
+ * cond_resched().
+ */
+static int genwqe_card_thread(void *data)
+{
+ int should_stop = 0, rc = 0;
+ struct genwqe_dev *cd = (struct genwqe_dev *)data;
+
+ while (!kthread_should_stop()) {
+
+ genwqe_check_ddcb_queue(cd, &cd->queue);
+
+ if (genwqe_polling_enabled) {
+ rc = wait_event_interruptible_timeout(
+ cd->queue_waitq,
+ genwqe_ddcbs_in_flight(cd) ||
+ (should_stop = kthread_should_stop()), 1);
+ } else {
+ rc = wait_event_interruptible_timeout(
+ cd->queue_waitq,
+ genwqe_next_ddcb_ready(cd) ||
+ (should_stop = kthread_should_stop()), HZ);
+ }
+ if (should_stop)
+ break;
+
+ /*
+ * Avoid soft lockups on heavy loads; we do not want
+ * to disable our interrupts.
+ */
+ cond_resched();
+ }
+ return 0;
+}
+
+/**
+ * genwqe_setup_service_layer() - Setup DDCB queue
+ * @cd: pointer to genwqe device descriptor
+ *
+ * Allocate DDCBs. Configure Service Layer Controller (SLC).
+ *
+ * Return: 0 success
+ */
+int genwqe_setup_service_layer(struct genwqe_dev *cd)
+{
+ int rc;
+ struct ddcb_queue *queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (genwqe_is_privileged(cd)) {
+ rc = genwqe_card_reset(cd);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: reset failed.\n", __func__);
+ return rc;
+ }
+ genwqe_read_softreset(cd);
+ }
+
+ queue = &cd->queue;
+ queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
+ queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
+ queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
+ queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
+ queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
+ queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
+ queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
+ queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
+ queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
+
+ rc = setup_ddcb_queue(cd, queue);
+ if (rc != 0) {
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ init_waitqueue_head(&cd->queue_waitq);
+ cd->card_thread = kthread_run(genwqe_card_thread, cd,
+ GENWQE_DEVNAME "%d_thread",
+ cd->card_idx);
+ if (IS_ERR(cd->card_thread)) {
+ rc = PTR_ERR(cd->card_thread);
+ cd->card_thread = NULL;
+ goto stop_free_queue;
+ }
+
+ rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
+ if (rc > 0)
+ rc = genwqe_set_interrupt_capability(cd, rc);
+ if (rc != 0) {
+ rc = -ENODEV;
+ goto stop_kthread;
+ }
+
+ /*
+ * We must have all wait-queues initialized when we enable the
+ * interrupts. Otherwise we might crash if we get an early
+ * irq.
+ */
+ init_waitqueue_head(&cd->health_waitq);
+
+ if (genwqe_is_privileged(cd)) {
+ rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
+ GENWQE_DEVNAME, cd);
+ } else {
+ rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
+ GENWQE_DEVNAME, cd);
+ }
+ if (rc < 0) {
+ dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
+ goto stop_irq_cap;
+ }
+
+ cd->card_state = GENWQE_CARD_USED;
+ return 0;
+
+ stop_irq_cap:
+ genwqe_reset_interrupt_capability(cd);
+ stop_kthread:
+ kthread_stop(cd->card_thread);
+ cd->card_thread = NULL;
+ stop_free_queue:
+ free_ddcb_queue(cd, queue);
+ err_out:
+ return rc;
+}
+
+/**
+ * queue_wake_up_all() - Handles fatal error case
+ *
+ * The PCI device got unusable and we have to stop all pending
+ * requests as fast as we can. The code after this must purge the
+ * DDCBs in question and ensure that all mappings are freed.
+ */
+static int queue_wake_up_all(struct genwqe_dev *cd)
+{
+ unsigned int i;
+ unsigned long flags;
+ struct ddcb_queue *queue = &cd->queue;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ for (i = 0; i < queue->ddcb_max; i++)
+ wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
+
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ return 0;
+}
+
+/**
+ * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
+ *
+ * Relies on the pre-condition that there are no users of the card
+ * device anymore e.g. with open file-descriptors.
+ *
+ * This function must be robust enough to be called twice.
+ */
+int genwqe_finish_queue(struct genwqe_dev *cd)
+{
+ int i, rc, in_flight;
+ int waitmax = genwqe_ddcb_software_timeout;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct ddcb_queue *queue = &cd->queue;
+
+ if (!ddcb_queue_initialized(queue))
+ return 0;
+
+ /* Do not wipe out the error state. */
+ if (cd->card_state == GENWQE_CARD_USED)
+ cd->card_state = GENWQE_CARD_UNUSED;
+
+ /* Wake up all requests in the DDCB queue such that they
+ should be removed nicely. */
+ queue_wake_up_all(cd);
+
+ /* We must wait to get rid of the DDCBs in flight */
+ for (i = 0; i < waitmax; i++) {
+ in_flight = genwqe_ddcbs_in_flight(cd);
+
+ if (in_flight == 0)
+ break;
+
+ dev_dbg(&pci_dev->dev,
+ " DEBUG [%d/%d] waiting for queue to get empty: "
+ "%d requests!\n", i, waitmax, in_flight);
+
+ /*
+ * Severe severe error situation: The card itself has
+ * 16 DDCB queues, each queue has e.g. 32 entries,
+ * each DDBC has a hardware timeout of currently 250
+ * msec but the PFs have a hardware timeout of 8 sec
+ * ... so I take something large.
+ */
+ msleep(1000);
+ }
+ if (i == waitmax) {
+ dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
+ __func__);
+ rc = -EIO;
+ }
+ return rc;
+}
+
+/**
+ * genwqe_release_service_layer() - Shutdown DDCB queue
+ * @cd: genwqe device descriptor
+ *
+ * This function must be robust enough to be called twice.
+ */
+int genwqe_release_service_layer(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!ddcb_queue_initialized(&cd->queue))
+ return 1;
+
+ free_irq(pci_dev->irq, cd);
+ genwqe_reset_interrupt_capability(cd);
+
+ if (cd->card_thread != NULL) {
+ kthread_stop(cd->card_thread);
+ cd->card_thread = NULL;
+ }
+
+ free_ddcb_queue(cd, &cd->queue);
+ return 0;
+}
diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h
new file mode 100644
index 0000000..c4f2672
--- /dev/null
+++ b/drivers/misc/genwqe/card_ddcb.h
@@ -0,0 +1,188 @@
+#ifndef __CARD_DDCB_H__
+#define __CARD_DDCB_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include "genwqe_driver.h"
+#include "card_base.h"
+
+/**
+ * struct ddcb - Device Driver Control Block DDCB
+ * @hsi: Hardware software interlock
+ * @shi: Software hardware interlock. Hsi and shi are used to interlock
+ * software and hardware activities. We are using a compare and
+ * swap operation to ensure that there are no races when
+ * activating new DDCBs on the queue, or when we need to
+ * purge a DDCB from a running queue.
+ * @acfunc: Accelerator function addresses a unit within the chip
+ * @cmd: Command to work on
+ * @cmdopts_16: Options for the command
+ * @asiv: Input data
+ * @asv: Output data
+ *
+ * The DDCB data format is big endian. Multiple consequtive DDBCs form
+ * a DDCB queue.
+ */
+#define ASIV_LENGTH 104 /* Old specification without ATS field */
+#define ASIV_LENGTH_ATS 96 /* New specification with ATS field */
+#define ASV_LENGTH 64
+
+struct ddcb {
+ union {
+ __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */
+ struct {
+ __be16 icrc_16;
+ u8 hsi;
+ u8 shi;
+ };
+ };
+ u8 pre; /* Preamble */
+ u8 xdir; /* Execution Directives */
+ __be16 seqnum_16; /* Sequence Number */
+
+ u8 acfunc; /* Accelerator Function.. */
+ u8 cmd; /* Command. */
+ __be16 cmdopts_16; /* Command Options */
+ u8 sur; /* Status Update Rate */
+ u8 psp; /* Protection Section Pointer */
+ __be16 rsvd_0e_16; /* Reserved invariant */
+
+ __be64 fwiv_64; /* Firmware Invariant. */
+
+ union {
+ struct {
+ __be64 ats_64; /* Address Translation Spec */
+ u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */
+ } n;
+ u8 __asiv[ASIV_LENGTH]; /* obsolete */
+ };
+ u8 asv[ASV_LENGTH]; /* Appl Spec Variant */
+
+ __be16 rsvd_c0_16; /* Reserved Variant */
+ __be16 vcrc_16; /* Variant CRC */
+ __be32 rsvd_32; /* Reserved unprotected */
+
+ __be64 deque_ts_64; /* Deque Time Stamp. */
+
+ __be16 retc_16; /* Return Code */
+ __be16 attn_16; /* Attention/Extended Error Codes */
+ __be32 progress_32; /* Progress indicator. */
+
+ __be64 cmplt_ts_64; /* Completion Time Stamp. */
+
+ /* The following layout matches the new service layer format */
+ __be32 ibdc_32; /* Inbound Data Count (* 256) */
+ __be32 obdc_32; /* Outbound Data Count (* 256) */
+
+ __be64 rsvd_SLH_64; /* Reserved for hardware */
+ union { /* private data for driver */
+ u8 priv[8];
+ __be64 priv_64;
+ };
+ __be64 disp_ts_64; /* Dispatch TimeStamp */
+} __attribute__((__packed__));
+
+/* CRC polynomials for DDCB */
+#define CRC16_POLYNOMIAL 0x1021
+
+/*
+ * SHI: Software to Hardware Interlock
+ * This 1 byte field is written by software to interlock the
+ * movement of one queue entry to another with the hardware in the
+ * chip.
+ */
+#define DDCB_SHI_INTR 0x04 /* Bit 2 */
+#define DDCB_SHI_PURGE 0x02 /* Bit 1 */
+#define DDCB_SHI_NEXT 0x01 /* Bit 0 */
+
+/*
+ * HSI: Hardware to Software interlock
+ * This 1 byte field is written by hardware to interlock the movement
+ * of one queue entry to another with the software in the chip.
+ */
+#define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */
+#define DDCB_HSI_FETCHED 0x04 /* Bit 2 */
+
+/*
+ * Accessing HSI/SHI is done 32-bit wide
+ * Normally 16-bit access would work too, but on some platforms the
+ * 16 compare and swap operation is not supported. Therefore
+ * switching to 32-bit such that those platforms will work too.
+ *
+ * iCRC HSI/SHI
+ */
+#define DDCB_INTR_BE32 cpu_to_be32(0x00000004)
+#define DDCB_PURGE_BE32 cpu_to_be32(0x00000002)
+#define DDCB_NEXT_BE32 cpu_to_be32(0x00000001)
+#define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000)
+#define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400)
+
+/* Definitions of DDCB presets */
+#define DDCB_PRESET_PRE 0x80
+#define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */
+#define VCRC_LENGTH(n) ((n)) /* used ASV */
+
+/*
+ * Genwqe Scatter Gather list
+ * Each element has up to 8 entries.
+ * The chaining element is element 0 cause of prefetching needs.
+ */
+
+/*
+ * 0b0110 Chained descriptor. The descriptor is describing the next
+ * descriptor list.
+ */
+#define SG_CHAINED (0x6)
+
+/*
+ * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty
+ * condition.
+ */
+#define SG_DATA (0x2)
+
+/*
+ * 0b0000 Early terminator. This is the last entry on the list
+ * irregardless of the length indicated.
+ */
+#define SG_END_LIST (0x0)
+
+/**
+ * struct sglist - Scatter gather list
+ * @target_addr: Either a dma addr of memory to work on or a
+ * dma addr or a subsequent sglist block.
+ * @len: Length of the data block.
+ * @flags: See above.
+ *
+ * Depending on the command the GenWQE card can use a scatter gather
+ * list to describe the memory it works on. Always 8 sg_entry's form
+ * a block.
+ */
+struct sg_entry {
+ __be64 target_addr;
+ __be32 len;
+ __be32 flags;
+};
+
+#endif /* __CARD_DDCB_H__ */
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
new file mode 100644
index 0000000..3bfdc07
--- /dev/null
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -0,0 +1,500 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Debugfs interfaces for the GenWQE card. Help to debug potential
+ * problems. Dump internal chip state for debugging and failure
+ * determination.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+#define GENWQE_DEBUGFS_RO(_name, _showfn) \
+ static int genwqe_debugfs_##_name##_open(struct inode *inode, \
+ struct file *file) \
+ { \
+ return single_open(file, _showfn, inode->i_private); \
+ } \
+ static const struct file_operations genwqe_##_name##_fops = { \
+ .open = genwqe_debugfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs,
+ int entries)
+{
+ unsigned int i;
+ u32 v_hi, v_lo;
+
+ for (i = 0; i < entries; i++) {
+ v_hi = (regs[i].val >> 32) & 0xffffffff;
+ v_lo = (regs[i].val) & 0xffffffff;
+
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n",
+ regs[i].addr, regs[i].idx, v_hi, v_lo);
+ }
+}
+
+static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
+{
+ struct genwqe_dev *cd = s->private;
+ int entries;
+ struct genwqe_reg *regs;
+
+ entries = genwqe_ffdc_buff_size(cd, uid);
+ if (entries < 0)
+ return -EINVAL;
+
+ if (entries == 0)
+ return 0;
+
+ regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL);
+ if (regs == NULL)
+ return -ENOMEM;
+
+ genwqe_stop_traps(cd); /* halt the traps while dumping data */
+ genwqe_ffdc_buff_read(cd, uid, regs, entries);
+ genwqe_start_traps(cd);
+
+ dbg_uidn_show(s, regs, entries);
+ kfree(regs);
+ return 0;
+}
+
+static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused)
+{
+ return curr_dbg_uidn_show(s, unused, 0);
+}
+
+GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show);
+
+static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused)
+{
+ return curr_dbg_uidn_show(s, unused, 1);
+}
+
+GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show);
+
+static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused)
+{
+ return curr_dbg_uidn_show(s, unused, 2);
+}
+
+GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show);
+
+static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
+{
+ struct genwqe_dev *cd = s->private;
+
+ dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries);
+ return 0;
+}
+
+static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused)
+{
+ return prev_dbg_uidn_show(s, unused, 0);
+}
+
+GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show);
+
+static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused)
+{
+ return prev_dbg_uidn_show(s, unused, 1);
+}
+
+GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show);
+
+static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused)
+{
+ return prev_dbg_uidn_show(s, unused, 2);
+}
+
+GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show);
+
+static int genwqe_curr_regs_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int i;
+ struct genwqe_reg *regs;
+
+ regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL);
+ if (regs == NULL)
+ return -ENOMEM;
+
+ genwqe_stop_traps(cd);
+ genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1);
+ genwqe_start_traps(cd);
+
+ for (i = 0; i < GENWQE_FFDC_REGS; i++) {
+ if (regs[i].addr == 0xffffffff)
+ break; /* invalid entries */
+
+ if (regs[i].val == 0x0ull)
+ continue; /* do not print 0x0 FIRs */
+
+ seq_printf(s, " 0x%08x 0x%016llx\n",
+ regs[i].addr, regs[i].val);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show);
+
+static int genwqe_prev_regs_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int i;
+ struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < GENWQE_FFDC_REGS; i++) {
+ if (regs[i].addr == 0xffffffff)
+ break; /* invalid entries */
+
+ if (regs[i].val == 0x0ull)
+ continue; /* do not print 0x0 FIRs */
+
+ seq_printf(s, " 0x%08x 0x%016llx\n",
+ regs[i].addr, regs[i].val);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show);
+
+static int genwqe_jtimer_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int vf_num;
+ u64 jtimer;
+
+ jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0);
+ seq_printf(s, " PF 0x%016llx %d msec\n", jtimer,
+ genwqe_pf_jobtimeout_msec);
+
+ for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
+ jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
+ vf_num + 1);
+ seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer,
+ cd->vf_jobtimeout_msec[vf_num]);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show);
+
+static int genwqe_queue_working_time_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int vf_num;
+ u64 t;
+
+ t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0);
+ seq_printf(s, " PF 0x%016llx\n", t);
+
+ for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
+ t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1);
+ seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show);
+
+static int genwqe_ddcb_info_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int i;
+ struct ddcb_queue *queue;
+ struct ddcb *pddcb;
+
+ queue = &cd->queue;
+ seq_puts(s, "DDCB QUEUE:\n");
+ seq_printf(s, " ddcb_max: %d\n"
+ " ddcb_daddr: %016llx - %016llx\n"
+ " ddcb_vaddr: %016llx\n"
+ " ddcbs_in_flight: %u\n"
+ " ddcbs_max_in_flight: %u\n"
+ " ddcbs_completed: %u\n"
+ " busy: %u\n"
+ " irqs_processed: %u\n",
+ queue->ddcb_max, (long long)queue->ddcb_daddr,
+ (long long)queue->ddcb_daddr +
+ (queue->ddcb_max * DDCB_LENGTH),
+ (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight,
+ queue->ddcbs_max_in_flight, queue->ddcbs_completed,
+ queue->busy, cd->irqs_processed);
+
+ /* Hardware State */
+ seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n"
+ " 0x%08x 0x%016llx IO_QUEUE_STATUS\n"
+ " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n"
+ " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n"
+ " 0x%08x 0x%016llx IO_QUEUE_WRAP\n"
+ " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n"
+ " 0x%08x 0x%016llx IO_QUEUE_WTIME\n"
+ " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n"
+ " 0x%08x 0x%016llx IO_QUEUE_LRW\n",
+ queue->IO_QUEUE_CONFIG,
+ __genwqe_readq(cd, queue->IO_QUEUE_CONFIG),
+ queue->IO_QUEUE_STATUS,
+ __genwqe_readq(cd, queue->IO_QUEUE_STATUS),
+ queue->IO_QUEUE_SEGMENT,
+ __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT),
+ queue->IO_QUEUE_INITSQN,
+ __genwqe_readq(cd, queue->IO_QUEUE_INITSQN),
+ queue->IO_QUEUE_WRAP,
+ __genwqe_readq(cd, queue->IO_QUEUE_WRAP),
+ queue->IO_QUEUE_OFFSET,
+ __genwqe_readq(cd, queue->IO_QUEUE_OFFSET),
+ queue->IO_QUEUE_WTIME,
+ __genwqe_readq(cd, queue->IO_QUEUE_WTIME),
+ queue->IO_QUEUE_ERRCNTS,
+ __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS),
+ queue->IO_QUEUE_LRW,
+ __genwqe_readq(cd, queue->IO_QUEUE_LRW));
+
+ seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n",
+ queue->ddcb_act, queue->ddcb_next);
+
+ pddcb = queue->ddcb_vaddr;
+ for (i = 0; i < queue->ddcb_max; i++) {
+ seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ",
+ i, be16_to_cpu(pddcb->retc_16),
+ be16_to_cpu(pddcb->seqnum_16),
+ pddcb->hsi, pddcb->shi);
+ seq_printf(s, "PRIV=%06llx CMD=%02x\n",
+ be64_to_cpu(pddcb->priv_64), pddcb->cmd);
+ pddcb++;
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show);
+
+static int genwqe_info_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ u16 val16, type;
+ u64 app_id, slu_id, bitstream = -1;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
+
+ if (genwqe_is_privileged(cd))
+ bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM);
+
+ val16 = (u16)(slu_id & 0x0fLLU);
+ type = (u16)((slu_id >> 20) & 0xffLLU);
+
+ seq_printf(s, "%s driver version: %s\n"
+ " Device Name/Type: %s %s CardIdx: %d\n"
+ " SLU/APP Config : 0x%016llx/0x%016llx\n"
+ " Build Date : %u/%x/%u\n"
+ " Base Clock : %u MHz\n"
+ " Arch/SVN Release: %u/%llx\n"
+ " Bitstream : %llx\n",
+ GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev),
+ genwqe_is_privileged(cd) ?
+ "Physical" : "Virtual or no SR-IOV",
+ cd->card_idx, slu_id, app_id,
+ (u16)((slu_id >> 12) & 0x0fLLU), /* month */
+ (u16)((slu_id >> 4) & 0xffLLU), /* day */
+ (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */
+ genwqe_base_clock_frequency(cd),
+ (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40,
+ bitstream);
+
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(info, genwqe_info_show);
+
+int genwqe_init_debugfs(struct genwqe_dev *cd)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+ char card_name[64];
+ char name[64];
+ unsigned int i;
+
+ sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx);
+
+ root = debugfs_create_dir(card_name, cd->debugfs_genwqe);
+ if (!root) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ /* non privileged interfaces are done here */
+ file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd,
+ &genwqe_ddcb_info_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("info", S_IRUGO, root, cd,
+ &genwqe_info_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_u32("ddcb_software_timeout", 0666, root,
+ &cd->ddcb_software_timeout);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_u32("kill_timeout", 0666, root,
+ &cd->kill_timeout);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ /* privileged interfaces follow here */
+ if (!genwqe_is_privileged(cd)) {
+ cd->debugfs_root = root;
+ return 0;
+ }
+
+ file = debugfs_create_file("curr_regs", S_IRUGO, root, cd,
+ &genwqe_curr_regs_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd,
+ &genwqe_curr_dbg_uid0_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd,
+ &genwqe_curr_dbg_uid1_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd,
+ &genwqe_curr_dbg_uid2_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_regs", S_IRUGO, root, cd,
+ &genwqe_prev_regs_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd,
+ &genwqe_prev_dbg_uid0_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd,
+ &genwqe_prev_dbg_uid1_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd,
+ &genwqe_prev_dbg_uid2_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for (i = 0; i < GENWQE_MAX_VFS; i++) {
+ sprintf(name, "vf%d_jobtimeout_msec", i);
+
+ file = debugfs_create_u32(name, 0666, root,
+ &cd->vf_jobtimeout_msec[i]);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ file = debugfs_create_file("jobtimer", S_IRUGO, root, cd,
+ &genwqe_jtimer_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd,
+ &genwqe_queue_working_time_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_u32("skip_recovery", 0666, root,
+ &cd->skip_recovery);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ cd->debugfs_root = root;
+ return 0;
+err1:
+ debugfs_remove_recursive(root);
+err0:
+ return ret;
+}
+
+void genqwe_exit_debugfs(struct genwqe_dev *cd)
+{
+ debugfs_remove_recursive(cd->debugfs_root);
+}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
new file mode 100644
index 0000000..8f8a6b3
--- /dev/null
+++ b/drivers/misc/genwqe/card_dev.c
@@ -0,0 +1,1414 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Character device representation of the GenWQE device. This allows
+ * user-space applications to communicate with the card.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+static int genwqe_open_files(struct genwqe_dev *cd)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ rc = list_empty(&cd->file_list);
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+ return !rc;
+}
+
+static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
+{
+ unsigned long flags;
+
+ cfile->owner = current;
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_add(&cfile->list, &cd->file_list);
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+}
+
+static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_del(&cfile->list);
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+
+ return 0;
+}
+
+static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->pin_lock, flags);
+ list_add(&m->pin_list, &cfile->pin_list);
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+}
+
+static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->pin_lock, flags);
+ list_del(&m->pin_list);
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * genwqe_search_pin() - Search for the mapping for a userspace address
+ * @cfile: Descriptor of opened file
+ * @u_addr: User virtual address
+ * @size: Size of buffer
+ * @dma_addr: DMA address to be updated
+ *
+ * Return: Pointer to the corresponding mapping NULL if not found
+ */
+static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
+ unsigned long u_addr,
+ unsigned int size,
+ void **virt_addr)
+{
+ unsigned long flags;
+ struct dma_mapping *m;
+
+ spin_lock_irqsave(&cfile->pin_lock, flags);
+
+ list_for_each_entry(m, &cfile->pin_list, pin_list) {
+ if ((((u64)m->u_vaddr) <= (u_addr)) &&
+ (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
+
+ if (virt_addr)
+ *virt_addr = m->k_vaddr +
+ (u_addr - (u64)m->u_vaddr);
+
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+ return m;
+ }
+ }
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+ return NULL;
+}
+
+static void __genwqe_add_mapping(struct genwqe_file *cfile,
+ struct dma_mapping *dma_map)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->map_lock, flags);
+ list_add(&dma_map->card_list, &cfile->map_list);
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+}
+
+static void __genwqe_del_mapping(struct genwqe_file *cfile,
+ struct dma_mapping *dma_map)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->map_lock, flags);
+ list_del(&dma_map->card_list);
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+}
+
+
+/**
+ * __genwqe_search_mapping() - Search for the mapping for a userspace address
+ * @cfile: descriptor of opened file
+ * @u_addr: user virtual address
+ * @size: size of buffer
+ * @dma_addr: DMA address to be updated
+ * Return: Pointer to the corresponding mapping NULL if not found
+ */
+static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
+ unsigned long u_addr,
+ unsigned int size,
+ dma_addr_t *dma_addr,
+ void **virt_addr)
+{
+ unsigned long flags;
+ struct dma_mapping *m;
+ struct pci_dev *pci_dev = cfile->cd->pci_dev;
+
+ spin_lock_irqsave(&cfile->map_lock, flags);
+ list_for_each_entry(m, &cfile->map_list, card_list) {
+
+ if ((((u64)m->u_vaddr) <= (u_addr)) &&
+ (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
+
+ /* match found: current is as expected and
+ addr is in range */
+ if (dma_addr)
+ *dma_addr = m->dma_addr +
+ (u_addr - (u64)m->u_vaddr);
+
+ if (virt_addr)
+ *virt_addr = m->k_vaddr +
+ (u_addr - (u64)m->u_vaddr);
+
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+ return m;
+ }
+ }
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+
+ dev_err(&pci_dev->dev,
+ "[%s] Entry not found: u_addr=%lx, size=%x\n",
+ __func__, u_addr, size);
+
+ return NULL;
+}
+
+static void genwqe_remove_mappings(struct genwqe_file *cfile)
+{
+ int i = 0;
+ struct list_head *node, *next;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cfile->cd->pci_dev;
+
+ list_for_each_safe(node, next, &cfile->map_list) {
+ dma_map = list_entry(node, struct dma_mapping, card_list);
+
+ list_del_init(&dma_map->card_list);
+
+ /*
+ * This is really a bug, because those things should
+ * have been already tidied up.
+ *
+ * GENWQE_MAPPING_RAW should have been removed via mmunmap().
+ * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
+ */
+ dev_err(&pci_dev->dev,
+ "[%s] %d. cleanup mapping: u_vaddr=%p "
+ "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++,
+ dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr,
+ (unsigned long)dma_map->dma_addr);
+
+ if (dma_map->type == GENWQE_MAPPING_RAW) {
+ /* we allocated this dynamically */
+ __genwqe_free_consistent(cd, dma_map->size,
+ dma_map->k_vaddr,
+ dma_map->dma_addr);
+ kfree(dma_map);
+ } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
+ /* we use dma_map statically from the request */
+ genwqe_user_vunmap(cd, dma_map, NULL);
+ }
+ }
+}
+
+static void genwqe_remove_pinnings(struct genwqe_file *cfile)
+{
+ struct list_head *node, *next;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = cfile->cd;
+
+ list_for_each_safe(node, next, &cfile->pin_list) {
+ dma_map = list_entry(node, struct dma_mapping, pin_list);
+
+ /*
+ * This is not a bug, because a killed processed might
+ * not call the unpin ioctl, which is supposed to free
+ * the resources.
+ *
+ * Pinnings are dymically allocated and need to be
+ * deleted.
+ */
+ list_del_init(&dma_map->pin_list);
+ genwqe_user_vunmap(cd, dma_map, NULL);
+ kfree(dma_map);
+ }
+}
+
+/**
+ * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
+ *
+ * E.g. genwqe_send_signal(cd, SIGIO);
+ */
+static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
+{
+ unsigned int files = 0;
+ unsigned long flags;
+ struct genwqe_file *cfile;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_for_each_entry(cfile, &cd->file_list, list) {
+ if (cfile->async_queue)
+ kill_fasync(&cfile->async_queue, sig, POLL_HUP);
+ files++;
+ }
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+ return files;
+}
+
+static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
+{
+ unsigned int files = 0;
+ unsigned long flags;
+ struct genwqe_file *cfile;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_for_each_entry(cfile, &cd->file_list, list) {
+ force_sig(sig, cfile->owner);
+ files++;
+ }
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+ return files;
+}
+
+/**
+ * genwqe_open() - file open
+ * @inode: file system information
+ * @filp: file handle
+ *
+ * This function is executed whenever an application calls
+ * open("/dev/genwqe",..).
+ *
+ * Return: 0 if successful or <0 if errors
+ */
+static int genwqe_open(struct inode *inode, struct file *filp)
+{
+ struct genwqe_dev *cd;
+ struct genwqe_file *cfile;
+ struct pci_dev *pci_dev;
+
+ cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
+ if (cfile == NULL)
+ return -ENOMEM;
+
+ cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
+ pci_dev = cd->pci_dev;
+ cfile->cd = cd;
+ cfile->filp = filp;
+ cfile->client = NULL;
+
+ spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */
+ INIT_LIST_HEAD(&cfile->map_list);
+
+ spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */
+ INIT_LIST_HEAD(&cfile->pin_list);
+
+ filp->private_data = cfile;
+
+ genwqe_add_file(cd, cfile);
+ return 0;
+}
+
+/**
+ * genwqe_fasync() - Setup process to receive SIGIO.
+ * @fd: file descriptor
+ * @filp: file handle
+ * @mode: file mode
+ *
+ * Sending a signal is working as following:
+ *
+ * if (cdev->async_queue)
+ * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
+ *
+ * Some devices also implement asynchronous notification to indicate
+ * when the device can be written; in this case, of course,
+ * kill_fasync must be called with a mode of POLL_OUT.
+ */
+static int genwqe_fasync(int fd, struct file *filp, int mode)
+{
+ struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
+ return fasync_helper(fd, filp, mode, &cdev->async_queue);
+}
+
+
+/**
+ * genwqe_release() - file close
+ * @inode: file system information
+ * @filp: file handle
+ *
+ * This function is executed whenever an application calls 'close(fd_genwqe)'
+ *
+ * Return: always 0
+ */
+static int genwqe_release(struct inode *inode, struct file *filp)
+{
+ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
+ struct genwqe_dev *cd = cfile->cd;
+
+ /* there must be no entries in these lists! */
+ genwqe_remove_mappings(cfile);
+ genwqe_remove_pinnings(cfile);
+
+ /* remove this filp from the asynchronously notified filp's */
+ genwqe_fasync(-1, filp, 0);
+
+ /*
+ * For this to work we must not release cd when this cfile is
+ * not yet released, otherwise the list entry is invalid,
+ * because the list itself gets reinstantiated!
+ */
+ genwqe_del_file(cd, cfile);
+ kfree(cfile);
+ return 0;
+}
+
+static void genwqe_vma_open(struct vm_area_struct *vma)
+{
+ /* nothing ... */
+}
+
+/**
+ * genwqe_vma_close() - Called each time when vma is unmapped
+ *
+ * Free memory which got allocated by GenWQE mmap().
+ */
+static void genwqe_vma_close(struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
+ cdev_genwqe);
+ struct pci_dev *pci_dev = cd->pci_dev;
+ dma_addr_t d_addr = 0;
+ struct genwqe_file *cfile = vma->vm_private_data;
+
+ dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
+ &d_addr, NULL);
+ if (dma_map == NULL) {
+ dev_err(&pci_dev->dev,
+ " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
+ __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
+ vsize);
+ return;
+ }
+ __genwqe_del_mapping(cfile, dma_map);
+ __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
+ dma_map->dma_addr);
+ kfree(dma_map);
+}
+
+static struct vm_operations_struct genwqe_vma_ops = {
+ .open = genwqe_vma_open,
+ .close = genwqe_vma_close,
+};
+
+/**
+ * genwqe_mmap() - Provide contignous buffers to userspace
+ *
+ * We use mmap() to allocate contignous buffers used for DMA
+ * transfers. After the buffer is allocated we remap it to user-space
+ * and remember a reference to our dma_mapping data structure, where
+ * we store the associated DMA address and allocated size.
+ *
+ * When we receive a DDCB execution request with the ATS bits set to
+ * plain buffer, we lookup our dma_mapping list to find the
+ * corresponding DMA address for the associated user-space address.
+ */
+static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int rc;
+ unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
+ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
+ struct genwqe_dev *cd = cfile->cd;
+ struct dma_mapping *dma_map;
+
+ if (vsize == 0)
+ return -EINVAL;
+
+ if (get_order(vsize) > MAX_ORDER)
+ return -ENOMEM;
+
+ dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
+ if (dma_map == NULL)
+ return -ENOMEM;
+
+ genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
+ dma_map->u_vaddr = (void *)vma->vm_start;
+ dma_map->size = vsize;
+ dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
+ dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
+ &dma_map->dma_addr);
+ if (dma_map->k_vaddr == NULL) {
+ rc = -ENOMEM;
+ goto free_dma_map;
+ }
+
+ if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
+ *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
+
+ pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
+ rc = remap_pfn_range(vma,
+ vma->vm_start,
+ pfn,
+ vsize,
+ vma->vm_page_prot);
+ if (rc != 0) {
+ rc = -EFAULT;
+ goto free_dma_mem;
+ }
+
+ vma->vm_private_data = cfile;
+ vma->vm_ops = &genwqe_vma_ops;
+ __genwqe_add_mapping(cfile, dma_map);
+
+ return 0;
+
+ free_dma_mem:
+ __genwqe_free_consistent(cd, dma_map->size,
+ dma_map->k_vaddr,
+ dma_map->dma_addr);
+ free_dma_map:
+ kfree(dma_map);
+ return rc;
+}
+
+/**
+ * do_flash_update() - Excute flash update (write image or CVPD)
+ * @cd: genwqe device
+ * @load: details about image load
+ *
+ * Return: 0 if successful
+ */
+
+#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
+
+static int do_flash_update(struct genwqe_file *cfile,
+ struct genwqe_bitstream *load)
+{
+ int rc = 0;
+ int blocks_to_flash;
+ dma_addr_t dma_addr;
+ u64 flash = 0;
+ size_t tocopy = 0;
+ u8 __user *buf;
+ u8 *xbuf;
+ u32 crc;
+ u8 cmdopts;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if ((load->size & 0x3) != 0)
+ return -EINVAL;
+
+ if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
+ return -EINVAL;
+
+ /* FIXME Bits have changed for new service layer! */
+ switch ((char)load->partition) {
+ case '0':
+ cmdopts = 0x14;
+ break; /* download/erase_first/part_0 */
+ case '1':
+ cmdopts = 0x1C;
+ break; /* download/erase_first/part_1 */
+ case 'v': /* cmdopts = 0x0c (VPD) */
+ default:
+ return -EINVAL;
+ }
+
+ buf = (u8 __user *)load->data_addr;
+ xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
+ if (xbuf == NULL)
+ return -ENOMEM;
+
+ blocks_to_flash = load->size / FLASH_BLOCK;
+ while (load->size) {
+ struct genwqe_ddcb_cmd *req;
+
+ /*
+ * We must be 4 byte aligned. Buffer must be 0 appened
+ * to have defined values when calculating CRC.
+ */
+ tocopy = min_t(size_t, load->size, FLASH_BLOCK);
+
+ rc = copy_from_user(xbuf, buf, tocopy);
+ if (rc) {
+ rc = -EFAULT;
+ goto free_buffer;
+ }
+ crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
+
+ dev_dbg(&pci_dev->dev,
+ "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
+ __func__, (unsigned long)dma_addr, crc, tocopy,
+ blocks_to_flash);
+
+ /* prepare DDCB for SLU process */
+ req = ddcb_requ_alloc();
+ if (req == NULL) {
+ rc = -ENOMEM;
+ goto free_buffer;
+ }
+
+ req->cmd = SLCMD_MOVE_FLASH;
+ req->cmdopts = cmdopts;
+
+ /* prepare invariant values */
+ if (genwqe_get_slu_id(cd) <= 0x2) {
+ *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr);
+ *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy);
+ *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
+ req->__asiv[24] = load->uid;
+ *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
+
+ /* for simulation only */
+ *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
+ *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
+ req->asiv_length = 32; /* bytes included in crc calc */
+ } else { /* setup DDCB for ATS architecture */
+ *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr);
+ *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy);
+ *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
+ *(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
+ *(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
+
+ /* for simulation only */
+ *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
+ *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
+
+ /* Rd only */
+ req->ats = 0x4ULL << 44;
+ req->asiv_length = 40; /* bytes included in crc calc */
+ }
+ req->asv_length = 8;
+
+ /* For Genwqe5 we get back the calculated CRC */
+ *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */
+
+ rc = __genwqe_execute_raw_ddcb(cd, req);
+
+ load->retc = req->retc;
+ load->attn = req->attn;
+ load->progress = req->progress;
+
+ if (rc < 0) {
+ ddcb_requ_free(req);
+ goto free_buffer;
+ }
+
+ if (req->retc != DDCB_RETC_COMPLETE) {
+ rc = -EIO;
+ ddcb_requ_free(req);
+ goto free_buffer;
+ }
+
+ load->size -= tocopy;
+ flash += tocopy;
+ buf += tocopy;
+ blocks_to_flash--;
+ ddcb_requ_free(req);
+ }
+
+ free_buffer:
+ __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
+ return rc;
+}
+
+static int do_flash_read(struct genwqe_file *cfile,
+ struct genwqe_bitstream *load)
+{
+ int rc, blocks_to_flash;
+ dma_addr_t dma_addr;
+ u64 flash = 0;
+ size_t tocopy = 0;
+ u8 __user *buf;
+ u8 *xbuf;
+ u8 cmdopts;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct genwqe_ddcb_cmd *cmd;
+
+ if ((load->size & 0x3) != 0)
+ return -EINVAL;
+
+ if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
+ return -EINVAL;
+
+ /* FIXME Bits have changed for new service layer! */
+ switch ((char)load->partition) {
+ case '0':
+ cmdopts = 0x12;
+ break; /* upload/part_0 */
+ case '1':
+ cmdopts = 0x1A;
+ break; /* upload/part_1 */
+ case 'v':
+ default:
+ return -EINVAL;
+ }
+
+ buf = (u8 __user *)load->data_addr;
+ xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
+ if (xbuf == NULL)
+ return -ENOMEM;
+
+ blocks_to_flash = load->size / FLASH_BLOCK;
+ while (load->size) {
+ /*
+ * We must be 4 byte aligned. Buffer must be 0 appened
+ * to have defined values when calculating CRC.
+ */
+ tocopy = min_t(size_t, load->size, FLASH_BLOCK);
+
+ dev_dbg(&pci_dev->dev,
+ "[%s] DMA: %lx SZ: %ld %d\n",
+ __func__, (unsigned long)dma_addr, tocopy,
+ blocks_to_flash);
+
+ /* prepare DDCB for SLU process */
+ cmd = ddcb_requ_alloc();
+ if (cmd == NULL) {
+ rc = -ENOMEM;
+ goto free_buffer;
+ }
+ cmd->cmd = SLCMD_MOVE_FLASH;
+ cmd->cmdopts = cmdopts;
+
+ /* prepare invariant values */
+ if (genwqe_get_slu_id(cd) <= 0x2) {
+ *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr);
+ *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy);
+ *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
+ cmd->__asiv[24] = load->uid;
+ *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
+ cmd->asiv_length = 32; /* bytes included in crc calc */
+ } else { /* setup DDCB for ATS architecture */
+ *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr);
+ *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy);
+ *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
+ *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
+ *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
+
+ /* rd/wr */
+ cmd->ats = 0x5ULL << 44;
+ cmd->asiv_length = 40; /* bytes included in crc calc */
+ }
+ cmd->asv_length = 8;
+
+ /* we only get back the calculated CRC */
+ *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */
+
+ rc = __genwqe_execute_raw_ddcb(cd, cmd);
+
+ load->retc = cmd->retc;
+ load->attn = cmd->attn;
+ load->progress = cmd->progress;
+
+ if ((rc < 0) && (rc != -EBADMSG)) {
+ ddcb_requ_free(cmd);
+ goto free_buffer;
+ }
+
+ rc = copy_to_user(buf, xbuf, tocopy);
+ if (rc) {
+ rc = -EFAULT;
+ ddcb_requ_free(cmd);
+ goto free_buffer;
+ }
+
+ /* We know that we can get retc 0x104 with CRC err */
+ if (((cmd->retc == DDCB_RETC_FAULT) &&
+ (cmd->attn != 0x02)) || /* Normally ignore CRC error */
+ ((cmd->retc == DDCB_RETC_COMPLETE) &&
+ (cmd->attn != 0x00))) { /* Everything was fine */
+ rc = -EIO;
+ ddcb_requ_free(cmd);
+ goto free_buffer;
+ }
+
+ load->size -= tocopy;
+ flash += tocopy;
+ buf += tocopy;
+ blocks_to_flash--;
+ ddcb_requ_free(cmd);
+ }
+ rc = 0;
+
+ free_buffer:
+ __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
+ return rc;
+}
+
+static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
+{
+ int rc;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cfile->cd->pci_dev;
+ struct dma_mapping *dma_map;
+ unsigned long map_addr;
+ unsigned long map_size;
+
+ if ((m->addr == 0x0) || (m->size == 0))
+ return -EINVAL;
+
+ map_addr = (m->addr & PAGE_MASK);
+ map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+
+ dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
+ if (dma_map == NULL)
+ return -ENOMEM;
+
+ genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
+ rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL);
+ if (rc != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ return rc;
+ }
+
+ genwqe_add_pin(cfile, dma_map);
+ return 0;
+}
+
+static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
+{
+ struct genwqe_dev *cd = cfile->cd;
+ struct dma_mapping *dma_map;
+ unsigned long map_addr;
+ unsigned long map_size;
+
+ if (m->addr == 0x0)
+ return -EINVAL;
+
+ map_addr = (m->addr & PAGE_MASK);
+ map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+
+ dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
+ if (dma_map == NULL)
+ return -ENOENT;
+
+ genwqe_del_pin(cfile, dma_map);
+ genwqe_user_vunmap(cd, dma_map, NULL);
+ kfree(dma_map);
+ return 0;
+}
+
+/**
+ * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
+ *
+ * Only if there are any. Pinnings are not removed.
+ */
+static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
+{
+ unsigned int i;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = cfile->cd;
+
+ for (i = 0; i < DDCB_FIXUPS; i++) {
+ dma_map = &req->dma_mappings[i];
+
+ if (dma_mapping_used(dma_map)) {
+ __genwqe_del_mapping(cfile, dma_map);
+ genwqe_user_vunmap(cd, dma_map, req);
+ }
+ if (req->sgl[i] != NULL) {
+ genwqe_free_sgl(cd, req->sgl[i],
+ req->sgl_dma_addr[i],
+ req->sgl_size[i]);
+ req->sgl[i] = NULL;
+ req->sgl_dma_addr[i] = 0x0;
+ req->sgl_size[i] = 0;
+ }
+
+ }
+ return 0;
+}
+
+/**
+ * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
+ *
+ * Before the DDCB gets executed we need to handle the fixups. We
+ * replace the user-space addresses with DMA addresses or do
+ * additional setup work e.g. generating a scatter-gather list which
+ * is used to describe the memory referred to in the fixup.
+ */
+static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
+{
+ int rc;
+ unsigned int asiv_offs, i;
+ struct genwqe_dev *cd = cfile->cd;
+ struct genwqe_ddcb_cmd *cmd = &req->cmd;
+ struct dma_mapping *m;
+ const char *type = "UNKNOWN";
+
+ for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
+ i++, asiv_offs += 0x08) {
+
+ u64 u_addr;
+ dma_addr_t d_addr;
+ u32 u_size = 0;
+ u64 ats_flags;
+
+ ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
+
+ switch (ats_flags) {
+
+ case ATS_TYPE_DATA:
+ break; /* nothing to do here */
+
+ case ATS_TYPE_FLAT_RDWR:
+ case ATS_TYPE_FLAT_RD: {
+ u_addr = be64_to_cpu(*((__be64 *)&cmd->
+ asiv[asiv_offs]));
+ u_size = be32_to_cpu(*((__be32 *)&cmd->
+ asiv[asiv_offs + 0x08]));
+
+ /*
+ * No data available. Ignore u_addr in this
+ * case and set addr to 0. Hardware must not
+ * fetch the buffer.
+ */
+ if (u_size == 0x0) {
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(0x0);
+ break;
+ }
+
+ m = __genwqe_search_mapping(cfile, u_addr, u_size,
+ &d_addr, NULL);
+ if (m == NULL) {
+ rc = -EFAULT;
+ goto err_out;
+ }
+
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(d_addr);
+ break;
+ }
+
+ case ATS_TYPE_SGL_RDWR:
+ case ATS_TYPE_SGL_RD: {
+ int page_offs, nr_pages, offs;
+
+ u_addr = be64_to_cpu(*((__be64 *)
+ &cmd->asiv[asiv_offs]));
+ u_size = be32_to_cpu(*((__be32 *)
+ &cmd->asiv[asiv_offs + 0x08]));
+
+ /*
+ * No data available. Ignore u_addr in this
+ * case and set addr to 0. Hardware must not
+ * fetch the empty sgl.
+ */
+ if (u_size == 0x0) {
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(0x0);
+ break;
+ }
+
+ m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
+ if (m != NULL) {
+ type = "PINNING";
+ page_offs = (u_addr -
+ (u64)m->u_vaddr)/PAGE_SIZE;
+ } else {
+ type = "MAPPING";
+ m = &req->dma_mappings[i];
+
+ genwqe_mapping_init(m,
+ GENWQE_MAPPING_SGL_TEMP);
+ rc = genwqe_user_vmap(cd, m, (void *)u_addr,
+ u_size, req);
+ if (rc != 0)
+ goto err_out;
+
+ __genwqe_add_mapping(cfile, m);
+ page_offs = 0;
+ }
+
+ offs = offset_in_page(u_addr);
+ nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
+
+ /* create genwqe style scatter gather list */
+ req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages,
+ &req->sgl_dma_addr[i],
+ &req->sgl_size[i]);
+ if (req->sgl[i] == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ genwqe_setup_sgl(cd, offs, u_size,
+ req->sgl[i],
+ req->sgl_dma_addr[i],
+ req->sgl_size[i],
+ m->dma_list,
+ page_offs,
+ nr_pages);
+
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(req->sgl_dma_addr[i]);
+
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ goto err_out;
+ }
+ }
+ return 0;
+
+ err_out:
+ ddcb_cmd_cleanup(cfile, req);
+ return rc;
+}
+
+/**
+ * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
+ *
+ * The code will build up the translation tables or lookup the
+ * contignous memory allocation table to find the right translations
+ * and DMA addresses.
+ */
+static int genwqe_execute_ddcb(struct genwqe_file *cfile,
+ struct genwqe_ddcb_cmd *cmd)
+{
+ int rc;
+ struct genwqe_dev *cd = cfile->cd;
+ struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
+
+ rc = ddcb_cmd_fixups(cfile, req);
+ if (rc != 0)
+ return rc;
+
+ rc = __genwqe_execute_raw_ddcb(cd, cmd);
+ ddcb_cmd_cleanup(cfile, req);
+ return rc;
+}
+
+static int do_execute_ddcb(struct genwqe_file *cfile,
+ unsigned long arg, int raw)
+{
+ int rc;
+ struct genwqe_ddcb_cmd *cmd;
+ struct ddcb_requ *req;
+ struct genwqe_dev *cd = cfile->cd;
+
+ cmd = ddcb_requ_alloc();
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ req = container_of(cmd, struct ddcb_requ, cmd);
+
+ if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
+ ddcb_requ_free(cmd);
+ return -EFAULT;
+ }
+
+ if (!raw)
+ rc = genwqe_execute_ddcb(cfile, cmd);
+ else
+ rc = __genwqe_execute_raw_ddcb(cd, cmd);
+
+ /* Copy back only the modifed fields. Do not copy ASIV
+ back since the copy got modified by the driver. */
+ if (copy_to_user((void __user *)arg, cmd,
+ sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
+ ddcb_requ_free(cmd);
+ return -EFAULT;
+ }
+
+ ddcb_requ_free(cmd);
+ return rc;
+}
+
+/**
+ * genwqe_ioctl() - IO control
+ * @filp: file handle
+ * @cmd: command identifier (passed from user)
+ * @arg: argument (passed from user)
+ *
+ * Return: 0 success
+ */
+static long genwqe_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc = 0;
+ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
+ struct genwqe_dev *cd = cfile->cd;
+ struct genwqe_reg_io __user *io;
+ u64 val;
+ u32 reg_offs;
+
+ if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
+ return -EINVAL;
+
+ switch (cmd) {
+
+ case GENWQE_GET_CARD_STATE:
+ put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
+ return 0;
+
+ /* Register access */
+ case GENWQE_READ_REG64: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
+ return -EINVAL;
+
+ val = __genwqe_readq(cd, reg_offs);
+ put_user(val, &io->val64);
+ return 0;
+ }
+
+ case GENWQE_WRITE_REG64: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
+ return -EINVAL;
+
+ if (get_user(val, &io->val64))
+ return -EFAULT;
+
+ __genwqe_writeq(cd, reg_offs, val);
+ return 0;
+ }
+
+ case GENWQE_READ_REG32: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
+ return -EINVAL;
+
+ val = __genwqe_readl(cd, reg_offs);
+ put_user(val, &io->val64);
+ return 0;
+ }
+
+ case GENWQE_WRITE_REG32: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
+ return -EINVAL;
+
+ if (get_user(val, &io->val64))
+ return -EFAULT;
+
+ __genwqe_writel(cd, reg_offs, val);
+ return 0;
+ }
+
+ /* Flash update/reading */
+ case GENWQE_SLU_UPDATE: {
+ struct genwqe_bitstream load;
+
+ if (!genwqe_is_privileged(cd))
+ return -EPERM;
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (copy_from_user(&load, (void __user *)arg,
+ sizeof(load)))
+ return -EFAULT;
+
+ rc = do_flash_update(cfile, &load);
+
+ if (copy_to_user((void __user *)arg, &load, sizeof(load)))
+ return -EFAULT;
+
+ return rc;
+ }
+
+ case GENWQE_SLU_READ: {
+ struct genwqe_bitstream load;
+
+ if (!genwqe_is_privileged(cd))
+ return -EPERM;
+
+ if (genwqe_flash_readback_fails(cd))
+ return -ENOSPC; /* known to fail for old versions */
+
+ if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
+ return -EFAULT;
+
+ rc = do_flash_read(cfile, &load);
+
+ if (copy_to_user((void __user *)arg, &load, sizeof(load)))
+ return -EFAULT;
+
+ return rc;
+ }
+
+ /* memory pinning and unpinning */
+ case GENWQE_PIN_MEM: {
+ struct genwqe_mem m;
+
+ if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
+ return -EFAULT;
+
+ return genwqe_pin_mem(cfile, &m);
+ }
+
+ case GENWQE_UNPIN_MEM: {
+ struct genwqe_mem m;
+
+ if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
+ return -EFAULT;
+
+ return genwqe_unpin_mem(cfile, &m);
+ }
+
+ /* launch an DDCB and wait for completion */
+ case GENWQE_EXECUTE_DDCB:
+ return do_execute_ddcb(cfile, arg, 0);
+
+ case GENWQE_EXECUTE_RAW_DDCB: {
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return do_execute_ddcb(cfile, arg, 1);
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+#if defined(CONFIG_COMPAT)
+/**
+ * genwqe_compat_ioctl() - Compatibility ioctl
+ *
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/genwqe<n>_card.
+ *
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * Return: zero on success or negative number on failure.
+ */
+static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return genwqe_ioctl(filp, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+static const struct file_operations genwqe_fops = {
+ .owner = THIS_MODULE,
+ .open = genwqe_open,
+ .fasync = genwqe_fasync,
+ .mmap = genwqe_mmap,
+ .unlocked_ioctl = genwqe_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = genwqe_compat_ioctl,
+#endif
+ .release = genwqe_release,
+};
+
+static int genwqe_device_initialized(struct genwqe_dev *cd)
+{
+ return cd->dev != NULL;
+}
+
+/**
+ * genwqe_device_create() - Create and configure genwqe char device
+ * @cd: genwqe device descriptor
+ *
+ * This function must be called before we create any more genwqe
+ * character devices, because it is allocating the major and minor
+ * number which are supposed to be used by the client drivers.
+ */
+int genwqe_device_create(struct genwqe_dev *cd)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /*
+ * Here starts the individual setup per client. It must
+ * initialize its own cdev data structure with its own fops.
+ * The appropriate devnum needs to be created. The ranges must
+ * not overlap.
+ */
+ rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
+ GENWQE_MAX_MINOR, GENWQE_DEVNAME);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
+ goto err_dev;
+ }
+
+ cdev_init(&cd->cdev_genwqe, &genwqe_fops);
+ cd->cdev_genwqe.owner = THIS_MODULE;
+
+ rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev, "err: cdev_add failed\n");
+ goto err_add;
+ }
+
+ /*
+ * Finally the device in /dev/... must be created. The rule is
+ * to use card%d_clientname for each created device.
+ */
+ cd->dev = device_create_with_groups(cd->class_genwqe,
+ &cd->pci_dev->dev,
+ cd->devnum_genwqe, cd,
+ genwqe_attribute_groups,
+ GENWQE_DEVNAME "%u_card",
+ cd->card_idx);
+ if (IS_ERR(cd->dev)) {
+ rc = PTR_ERR(cd->dev);
+ goto err_cdev;
+ }
+
+ rc = genwqe_init_debugfs(cd);
+ if (rc != 0)
+ goto err_debugfs;
+
+ return 0;
+
+ err_debugfs:
+ device_destroy(cd->class_genwqe, cd->devnum_genwqe);
+ err_cdev:
+ cdev_del(&cd->cdev_genwqe);
+ err_add:
+ unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
+ err_dev:
+ cd->dev = NULL;
+ return rc;
+}
+
+static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
+{
+ int rc;
+ unsigned int i;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!genwqe_open_files(cd))
+ return 0;
+
+ dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
+
+ rc = genwqe_kill_fasync(cd, SIGIO);
+ if (rc > 0) {
+ /* give kill_timeout seconds to close file descriptors ... */
+ for (i = 0; (i < genwqe_kill_timeout) &&
+ genwqe_open_files(cd); i++) {
+ dev_info(&pci_dev->dev, " %d sec ...", i);
+
+ cond_resched();
+ msleep(1000);
+ }
+
+ /* if no open files we can safely continue, else ... */
+ if (!genwqe_open_files(cd))
+ return 0;
+
+ dev_warn(&pci_dev->dev,
+ "[%s] send SIGKILL and wait ...\n", __func__);
+
+ rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
+ if (rc) {
+ /* Give kill_timout more seconds to end processes */
+ for (i = 0; (i < genwqe_kill_timeout) &&
+ genwqe_open_files(cd); i++) {
+ dev_warn(&pci_dev->dev, " %d sec ...", i);
+
+ cond_resched();
+ msleep(1000);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * genwqe_device_remove() - Remove genwqe's char device
+ *
+ * This function must be called after the client devices are removed
+ * because it will free the major/minor number range for the genwqe
+ * drivers.
+ *
+ * This function must be robust enough to be called twice.
+ */
+int genwqe_device_remove(struct genwqe_dev *cd)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!genwqe_device_initialized(cd))
+ return 1;
+
+ genwqe_inform_and_stop_processes(cd);
+
+ /*
+ * We currently do wait until all filedescriptors are
+ * closed. This leads to a problem when we abort the
+ * application which will decrease this reference from
+ * 1/unused to 0/illegal and not from 2/used 1/empty.
+ */
+ rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
+ if (rc != 1) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
+ panic("Fatal err: cannot free resources with pending references!");
+ }
+
+ genqwe_exit_debugfs(cd);
+ device_destroy(cd->class_genwqe, cd->devnum_genwqe);
+ cdev_del(&cd->cdev_genwqe);
+ unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
+ cd->dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
new file mode 100644
index 0000000..a72a992
--- /dev/null
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -0,0 +1,288 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Sysfs interfaces for the GenWQE card. There are attributes to query
+ * the version of the bitstream as well as some for the driver. For
+ * debugging, please also see the debugfs interfaces of this driver.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+static const char * const genwqe_types[] = {
+ [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230",
+ [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530",
+ [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4",
+ [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7",
+};
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+ const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" };
+
+ return sprintf(buf, "%s\n", cs[cd->card_state]);
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t appid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char app_name[5];
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ genwqe_read_app_id(cd, app_name, sizeof(app_name));
+ return sprintf(buf, "%s\n", app_name);
+}
+static DEVICE_ATTR_RO(appid);
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u64 slu_id, app_id;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
+
+ return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u8 card_type;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ card_type = genwqe_card_type(cd);
+ return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ?
+ "invalid" : genwqe_types[card_type]);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t driver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", DRV_VERS_STRING);
+}
+static DEVICE_ATTR_RO(driver);
+
+static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u64 tempsens;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR);
+ return sprintf(buf, "%016llx\n", tempsens);
+}
+static DEVICE_ATTR_RO(tempsens);
+
+static ssize_t freerunning_timer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 t;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER);
+ return sprintf(buf, "%016llx\n", t);
+}
+static DEVICE_ATTR_RO(freerunning_timer);
+
+static ssize_t queue_working_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 t;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME);
+ return sprintf(buf, "%016llx\n", t);
+}
+static DEVICE_ATTR_RO(queue_working_time);
+
+static ssize_t base_clock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 base_clock;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ base_clock = genwqe_base_clock_frequency(cd);
+ return sprintf(buf, "%lld\n", base_clock);
+}
+static DEVICE_ATTR_RO(base_clock);
+
+/**
+ * curr_bitstream_show() - Show the current bitstream id
+ *
+ * There is a bug in some old versions of the CPLD which selects the
+ * bitstream, which causes the IO_SLU_BITSTREAM register to report
+ * unreliable data in very rare cases. This makes this sysfs
+ * unreliable up to the point were a new CPLD version is being used.
+ *
+ * Unfortunately there is no automatic way yet to query the CPLD
+ * version, such that you need to manually ensure via programming
+ * tools that you have a recent version of the CPLD software.
+ *
+ * The proposed circumvention is to use a special recovery bitstream
+ * on the backup partition (0) to identify problems while loading the
+ * image.
+ */
+static ssize_t curr_bitstream_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int curr_bitstream;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
+ return sprintf(buf, "%d\n", curr_bitstream);
+}
+static DEVICE_ATTR_RO(curr_bitstream);
+
+/**
+ * next_bitstream_show() - Show the next activated bitstream
+ *
+ * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
+ */
+static ssize_t next_bitstream_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int next_bitstream;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ switch ((cd->softreset & 0xc) >> 2) {
+ case 0x2:
+ next_bitstream = 0;
+ break;
+ case 0x3:
+ next_bitstream = 1;
+ break;
+ default:
+ next_bitstream = -1;
+ break; /* error */
+ }
+ return sprintf(buf, "%d\n", next_bitstream);
+}
+
+static ssize_t next_bitstream_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int partition;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ if (kstrtoint(buf, 0, &partition) < 0)
+ return -EINVAL;
+
+ switch (partition) {
+ case 0x0:
+ cd->softreset = 0x78;
+ break;
+ case 0x1:
+ cd->softreset = 0x7c;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
+ return count;
+}
+static DEVICE_ATTR_RW(next_bitstream);
+
+/*
+ * Create device_attribute structures / params: name, mode, show, store
+ * additional flag if valid in VF
+ */
+static struct attribute *genwqe_attributes[] = {
+ &dev_attr_tempsens.attr,
+ &dev_attr_next_bitstream.attr,
+ &dev_attr_curr_bitstream.attr,
+ &dev_attr_base_clock.attr,
+ &dev_attr_driver.attr,
+ &dev_attr_type.attr,
+ &dev_attr_version.attr,
+ &dev_attr_appid.attr,
+ &dev_attr_status.attr,
+ &dev_attr_freerunning_timer.attr,
+ &dev_attr_queue_working_time.attr,
+ NULL,
+};
+
+static struct attribute *genwqe_normal_attributes[] = {
+ &dev_attr_driver.attr,
+ &dev_attr_type.attr,
+ &dev_attr_version.attr,
+ &dev_attr_appid.attr,
+ &dev_attr_status.attr,
+ &dev_attr_freerunning_timer.attr,
+ &dev_attr_queue_working_time.attr,
+ NULL,
+};
+
+/**
+ * genwqe_is_visible() - Determine if sysfs attribute should be visible or not
+ *
+ * VFs have restricted mmio capabilities, so not all sysfs entries
+ * are allowed in VFs.
+ */
+static umode_t genwqe_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ unsigned int j;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (genwqe_is_privileged(cd))
+ return mode;
+
+ for (j = 0; genwqe_normal_attributes[j] != NULL; j++)
+ if (genwqe_normal_attributes[j] == attr)
+ return mode;
+
+ return 0;
+}
+
+static struct attribute_group genwqe_attribute_group = {
+ .is_visible = genwqe_is_visible,
+ .attrs = genwqe_attributes,
+};
+
+const struct attribute_group *genwqe_attribute_groups[] = {
+ &genwqe_attribute_group,
+ NULL,
+};
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
new file mode 100644
index 0000000..6b1a6ef
--- /dev/null
+++ b/drivers/misc/genwqe/card_utils.c
@@ -0,0 +1,944 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Miscelanous functionality used in the other GenWQE driver parts.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/page-flags.h>
+#include <linux/scatterlist.h>
+#include <linux/hugetlb.h>
+#include <linux/iommu.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <asm/pgtable.h>
+
+#include "genwqe_driver.h"
+#include "card_base.h"
+#include "card_ddcb.h"
+
+/**
+ * __genwqe_writeq() - Write 64-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: byte offset within BAR
+ * @val: 64-bit value
+ *
+ * Return: 0 if success; < 0 if error
+ */
+int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return -EIO;
+
+ if (cd->mmio == NULL)
+ return -EIO;
+
+ __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
+ return 0;
+}
+
+/**
+ * __genwqe_readq() - Read 64-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: offset within BAR
+ *
+ * Return: value from register
+ */
+u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return 0xffffffffffffffffull;
+
+ if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
+ (byte_offs == IO_SLC_CFGREG_GFIR))
+ return 0x000000000000ffffull;
+
+ if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
+ (byte_offs == IO_SLC_CFGREG_GFIR))
+ return 0x00000000ffff0000ull;
+
+ if (cd->mmio == NULL)
+ return 0xffffffffffffffffull;
+
+ return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
+}
+
+/**
+ * __genwqe_writel() - Write 32-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: byte offset within BAR
+ * @val: 32-bit value
+ *
+ * Return: 0 if success; < 0 if error
+ */
+int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return -EIO;
+
+ if (cd->mmio == NULL)
+ return -EIO;
+
+ __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
+ return 0;
+}
+
+/**
+ * __genwqe_readl() - Read 32-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: offset within BAR
+ *
+ * Return: Value from register
+ */
+u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return 0xffffffff;
+
+ if (cd->mmio == NULL)
+ return 0xffffffff;
+
+ return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
+}
+
+/**
+ * genwqe_read_app_id() - Extract app_id
+ *
+ * app_unitcfg need to be filled with valid data first
+ */
+int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
+{
+ int i, j;
+ u32 app_id = (u32)cd->app_unitcfg;
+
+ memset(app_name, 0, len);
+ for (i = 0, j = 0; j < min(len, 4); j++) {
+ char ch = (char)((app_id >> (24 - j*8)) & 0xff);
+ if (ch == ' ')
+ continue;
+ app_name[i++] = isprint(ch) ? ch : 'X';
+ }
+ return i;
+}
+
+/**
+ * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
+ *
+ * Existing kernel functions seem to use a different polynom,
+ * therefore we could not use them here.
+ *
+ * Genwqe's Polynomial = 0x20044009
+ */
+#define CRC32_POLYNOMIAL 0x20044009
+static u32 crc32_tab[256]; /* crc32 lookup table */
+
+void genwqe_init_crc32(void)
+{
+ int i, j;
+ u32 crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = i << 24;
+ for (j = 0; j < 8; j++) {
+ if (crc & 0x80000000)
+ crc = (crc << 1) ^ CRC32_POLYNOMIAL;
+ else
+ crc = (crc << 1);
+ }
+ crc32_tab[i] = crc;
+ }
+}
+
+/**
+ * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
+ * @buff: pointer to data buffer
+ * @len: length of data for calculation
+ * @init: initial crc (0xffffffff at start)
+ *
+ * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
+
+ * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
+ * result in a crc32 of 0xf33cb7d3.
+ *
+ * The existing kernel crc functions did not cover this polynom yet.
+ *
+ * Return: crc32 checksum.
+ */
+u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
+{
+ int i;
+ u32 crc;
+
+ crc = init;
+ while (len--) {
+ i = ((crc >> 24) ^ *buff++) & 0xFF;
+ crc = (crc << 8) ^ crc32_tab[i];
+ }
+ return crc;
+}
+
+void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
+ dma_addr_t *dma_handle)
+{
+ if (get_order(size) > MAX_ORDER)
+ return NULL;
+
+ return pci_alloc_consistent(cd->pci_dev, size, dma_handle);
+}
+
+void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ if (vaddr == NULL)
+ return;
+
+ pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle);
+}
+
+static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
+ int num_pages)
+{
+ int i;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
+ pci_unmap_page(pci_dev, dma_list[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_list[i] = 0x0;
+ }
+}
+
+static int genwqe_map_pages(struct genwqe_dev *cd,
+ struct page **page_list, int num_pages,
+ dma_addr_t *dma_list)
+{
+ int i;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /* establish DMA mapping for requested pages */
+ for (i = 0; i < num_pages; i++) {
+ dma_addr_t daddr;
+
+ dma_list[i] = 0x0;
+ daddr = pci_map_page(pci_dev, page_list[i],
+ 0, /* map_offs */
+ PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
+
+ if (pci_dma_mapping_error(pci_dev, daddr)) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: no dma addr daddr=%016llx!\n",
+ __func__, (long long)daddr);
+ goto err;
+ }
+
+ dma_list[i] = daddr;
+ }
+ return 0;
+
+ err:
+ genwqe_unmap_pages(cd, dma_list, num_pages);
+ return -EIO;
+}
+
+static int genwqe_sgl_size(int num_pages)
+{
+ int len, num_tlb = num_pages / 7;
+
+ len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
+ return roundup(len, PAGE_SIZE);
+}
+
+struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
+ dma_addr_t *dma_addr, size_t *sgl_size)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct sg_entry *sgl;
+
+ *sgl_size = genwqe_sgl_size(num_pages);
+ if (get_order(*sgl_size) > MAX_ORDER) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: too much memory requested!\n", __func__);
+ return NULL;
+ }
+
+ sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr);
+ if (sgl == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: no memory available!\n", __func__);
+ return NULL;
+ }
+
+ return sgl;
+}
+
+int genwqe_setup_sgl(struct genwqe_dev *cd,
+ unsigned long offs,
+ unsigned long size,
+ struct sg_entry *sgl,
+ dma_addr_t dma_addr, size_t sgl_size,
+ dma_addr_t *dma_list, int page_offs, int num_pages)
+{
+ int i = 0, j = 0, p;
+ unsigned long dma_offs, map_offs;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ dma_addr_t prev_daddr = 0;
+ struct sg_entry *s, *last_s = NULL;
+
+ /* sanity checks */
+ if (offs > PAGE_SIZE) {
+ dev_err(&pci_dev->dev,
+ "[%s] too large start offs %08lx\n", __func__, offs);
+ return -EFAULT;
+ }
+ if (sgl_size < genwqe_sgl_size(num_pages)) {
+ dev_err(&pci_dev->dev,
+ "[%s] sgl_size too small %08lx for %d pages\n",
+ __func__, sgl_size, num_pages);
+ return -EFAULT;
+ }
+
+ dma_offs = 128; /* next block if needed/dma_offset */
+ map_offs = offs; /* offset in first page */
+
+ s = &sgl[0]; /* first set of 8 entries */
+ p = 0; /* page */
+ while (p < num_pages) {
+ dma_addr_t daddr;
+ unsigned int size_to_map;
+
+ /* always write the chaining entry, cleanup is done later */
+ j = 0;
+ s[j].target_addr = cpu_to_be64(dma_addr + dma_offs);
+ s[j].len = cpu_to_be32(128);
+ s[j].flags = cpu_to_be32(SG_CHAINED);
+ j++;
+
+ while (j < 8) {
+ /* DMA mapping for requested page, offs, size */
+ size_to_map = min(size, PAGE_SIZE - map_offs);
+ daddr = dma_list[page_offs + p] + map_offs;
+ size -= size_to_map;
+ map_offs = 0;
+
+ if (prev_daddr == daddr) {
+ u32 prev_len = be32_to_cpu(last_s->len);
+
+ /* pr_info("daddr combining: "
+ "%016llx/%08x -> %016llx\n",
+ prev_daddr, prev_len, daddr); */
+
+ last_s->len = cpu_to_be32(prev_len +
+ size_to_map);
+
+ p++; /* process next page */
+ if (p == num_pages)
+ goto fixup; /* nothing to do */
+
+ prev_daddr = daddr + size_to_map;
+ continue;
+ }
+
+ /* start new entry */
+ s[j].target_addr = cpu_to_be64(daddr);
+ s[j].len = cpu_to_be32(size_to_map);
+ s[j].flags = cpu_to_be32(SG_DATA);
+ prev_daddr = daddr + size_to_map;
+ last_s = &s[j];
+ j++;
+
+ p++; /* process next page */
+ if (p == num_pages)
+ goto fixup; /* nothing to do */
+ }
+ dma_offs += 128;
+ s += 8; /* continue 8 elements further */
+ }
+ fixup:
+ if (j == 1) { /* combining happend on last entry! */
+ s -= 8; /* full shift needed on previous sgl block */
+ j = 7; /* shift all elements */
+ }
+
+ for (i = 0; i < j; i++) /* move elements 1 up */
+ s[i] = s[i + 1];
+
+ s[i].target_addr = cpu_to_be64(0);
+ s[i].len = cpu_to_be32(0);
+ s[i].flags = cpu_to_be32(SG_END_LIST);
+ return 0;
+}
+
+void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
+ dma_addr_t dma_addr, size_t size)
+{
+ __genwqe_free_consistent(cd, size, sg_list, dma_addr);
+}
+
+/**
+ * free_user_pages() - Give pinned pages back
+ *
+ * Documentation of get_user_pages is in mm/memory.c:
+ *
+ * If the page is written to, set_page_dirty (or set_page_dirty_lock,
+ * as appropriate) must be called after the page is finished with, and
+ * before put_page is called.
+ *
+ * FIXME Could be of use to others and might belong in the generic
+ * code, if others agree. E.g.
+ * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c
+ * ceph_put_page_vector in net/ceph/pagevec.c
+ * maybe more?
+ */
+static int free_user_pages(struct page **page_list, unsigned int nr_pages,
+ int dirty)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (page_list[i] != NULL) {
+ if (dirty)
+ set_page_dirty_lock(page_list[i]);
+ put_page(page_list[i]);
+ }
+ }
+ return 0;
+}
+
+/**
+ * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
+ * @cd: pointer to genwqe device
+ * @m: mapping params
+ * @uaddr: user virtual address
+ * @size: size of memory to be mapped
+ *
+ * We need to think about how we could speed this up. Of course it is
+ * not a good idea to do this over and over again, like we are
+ * currently doing it. Nevertheless, I am curious where on the path
+ * the performance is spend. Most probably within the memory
+ * allocation functions, but maybe also in the DMA mapping code.
+ *
+ * Restrictions: The maximum size of the possible mapping currently depends
+ * on the amount of memory we can get using kzalloc() for the
+ * page_list and pci_alloc_consistent for the sg_list.
+ * The sg_list is currently itself not scattered, which could
+ * be fixed with some effort. The page_list must be split into
+ * PAGE_SIZE chunks too. All that will make the complicated
+ * code more complicated.
+ *
+ * Return: 0 if success
+ */
+int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
+ unsigned long size, struct ddcb_requ *req)
+{
+ int rc = -EINVAL;
+ unsigned long data, offs;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if ((uaddr == NULL) || (size == 0)) {
+ m->size = 0; /* mark unused and not added */
+ return -EINVAL;
+ }
+ m->u_vaddr = uaddr;
+ m->size = size;
+
+ /* determine space needed for page_list. */
+ data = (unsigned long)uaddr;
+ offs = offset_in_page(data);
+ m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
+
+ m->page_list = kcalloc(m->nr_pages,
+ sizeof(struct page *) + sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!m->page_list) {
+ dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
+ m->nr_pages = 0;
+ m->u_vaddr = NULL;
+ m->size = 0; /* mark unused and not added */
+ return -ENOMEM;
+ }
+ m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
+
+ /* pin user pages in memory */
+ rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
+ m->nr_pages,
+ 1, /* write by caller */
+ m->page_list); /* ptrs to pages */
+
+ /* assumption: get_user_pages can be killed by signals. */
+ if (rc < m->nr_pages) {
+ free_user_pages(m->page_list, rc, 0);
+ rc = -EFAULT;
+ goto fail_get_user_pages;
+ }
+
+ rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
+ if (rc != 0)
+ goto fail_free_user_pages;
+
+ return 0;
+
+ fail_free_user_pages:
+ free_user_pages(m->page_list, m->nr_pages, 0);
+
+ fail_get_user_pages:
+ kfree(m->page_list);
+ m->page_list = NULL;
+ m->dma_list = NULL;
+ m->nr_pages = 0;
+ m->u_vaddr = NULL;
+ m->size = 0; /* mark unused and not added */
+ return rc;
+}
+
+/**
+ * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
+ * memory
+ * @cd: pointer to genwqe device
+ * @m: mapping params
+ */
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
+ struct ddcb_requ *req)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!dma_mapping_used(m)) {
+ dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
+ __func__, m);
+ return -EINVAL;
+ }
+
+ if (m->dma_list)
+ genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
+
+ if (m->page_list) {
+ free_user_pages(m->page_list, m->nr_pages, 1);
+
+ kfree(m->page_list);
+ m->page_list = NULL;
+ m->dma_list = NULL;
+ m->nr_pages = 0;
+ }
+
+ m->u_vaddr = NULL;
+ m->size = 0; /* mark as unused and not added */
+ return 0;
+}
+
+/**
+ * genwqe_card_type() - Get chip type SLU Configuration Register
+ * @cd: pointer to the genwqe device descriptor
+ * Return: 0: Altera Stratix-IV 230
+ * 1: Altera Stratix-IV 530
+ * 2: Altera Stratix-V A4
+ * 3: Altera Stratix-V A7
+ */
+u8 genwqe_card_type(struct genwqe_dev *cd)
+{
+ u64 card_type = cd->slu_unitcfg;
+ return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
+}
+
+/**
+ * genwqe_card_reset() - Reset the card
+ * @cd: pointer to the genwqe device descriptor
+ */
+int genwqe_card_reset(struct genwqe_dev *cd)
+{
+ u64 softrst;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!genwqe_is_privileged(cd))
+ return -ENODEV;
+
+ /* new SL */
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
+ msleep(1000);
+ __genwqe_readq(cd, IO_HSU_FIR_CLR);
+ __genwqe_readq(cd, IO_APP_FIR_CLR);
+ __genwqe_readq(cd, IO_SLU_FIR_CLR);
+
+ /*
+ * Read-modify-write to preserve the stealth bits
+ *
+ * For SL >= 039, Stealth WE bit allows removing
+ * the read-modify-wrote.
+ * r-m-w may require a mask 0x3C to avoid hitting hard
+ * reset again for error reset (should be 0, chicken).
+ */
+ softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
+
+ /* give ERRORRESET some time to finish */
+ msleep(50);
+
+ if (genwqe_need_err_masking(cd)) {
+ dev_info(&pci_dev->dev,
+ "[%s] masking errors for old bitstreams\n", __func__);
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
+ }
+ return 0;
+}
+
+int genwqe_read_softreset(struct genwqe_dev *cd)
+{
+ u64 bitstream;
+
+ if (!genwqe_is_privileged(cd))
+ return -ENODEV;
+
+ bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
+ cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
+ return 0;
+}
+
+/**
+ * genwqe_set_interrupt_capability() - Configure MSI capability structure
+ * @cd: pointer to the device
+ * Return: 0 if no error
+ */
+int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ rc = pci_enable_msi_block(pci_dev, count);
+ if (rc == 0)
+ cd->flags |= GENWQE_FLAG_MSI_ENABLED;
+ return rc;
+}
+
+/**
+ * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
+ * @cd: pointer to the device
+ */
+void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
+ pci_disable_msi(pci_dev);
+ cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
+ }
+}
+
+/**
+ * set_reg_idx() - Fill array with data. Ignore illegal offsets.
+ * @cd: card device
+ * @r: debug register array
+ * @i: index to desired entry
+ * @m: maximum possible entries
+ * @addr: addr which is read
+ * @index: index in debug array
+ * @val: read value
+ */
+static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
+ unsigned int *i, unsigned int m, u32 addr, u32 idx,
+ u64 val)
+{
+ if (WARN_ON_ONCE(*i >= m))
+ return -EFAULT;
+
+ r[*i].addr = addr;
+ r[*i].idx = idx;
+ r[*i].val = val;
+ ++*i;
+ return 0;
+}
+
+static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
+ unsigned int *i, unsigned int m, u32 addr, u64 val)
+{
+ return set_reg_idx(cd, r, i, m, addr, 0, val);
+}
+
+int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
+ unsigned int max_regs, int all)
+{
+ unsigned int i, j, idx = 0;
+ u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
+ u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
+
+ /* Global FIR */
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
+
+ /* UnitCfg for SLU */
+ sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
+ set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
+
+ /* UnitCfg for APP */
+ appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
+ set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
+
+ /* Check all chip Units */
+ for (i = 0; i < GENWQE_MAX_UNITS; i++) {
+
+ /* Unit FIR */
+ ufir_addr = (i << 24) | 0x008;
+ ufir = __genwqe_readq(cd, ufir_addr);
+ set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
+
+ /* Unit FEC */
+ ufec_addr = (i << 24) | 0x018;
+ ufec = __genwqe_readq(cd, ufec_addr);
+ set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
+
+ for (j = 0; j < 64; j++) {
+ /* wherever there is a primary 1, read the 2ndary */
+ if (!all && (!(ufir & (1ull << j))))
+ continue;
+
+ sfir_addr = (i << 24) | (0x100 + 8 * j);
+ sfir = __genwqe_readq(cd, sfir_addr);
+ set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
+
+ sfec_addr = (i << 24) | (0x300 + 8 * j);
+ sfec = __genwqe_readq(cd, sfec_addr);
+ set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
+ }
+ }
+
+ /* fill with invalid data until end */
+ for (i = idx; i < max_regs; i++) {
+ regs[i].addr = 0xffffffff;
+ regs[i].val = 0xffffffffffffffffull;
+ }
+ return idx;
+}
+
+/**
+ * genwqe_ffdc_buff_size() - Calculates the number of dump registers
+ */
+int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
+{
+ int entries = 0, ring, traps, traces, trace_entries;
+ u32 eevptr_addr, l_addr, d_len, d_type;
+ u64 eevptr, val, addr;
+
+ eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
+ eevptr = __genwqe_readq(cd, eevptr_addr);
+
+ if ((eevptr != 0x0) && (eevptr != -1ull)) {
+ l_addr = GENWQE_UID_OFFS(uid) | eevptr;
+
+ while (1) {
+ val = __genwqe_readq(cd, l_addr);
+
+ if ((val == 0x0) || (val == -1ull))
+ break;
+
+ /* 38:24 */
+ d_len = (val & 0x0000007fff000000ull) >> 24;
+
+ /* 39 */
+ d_type = (val & 0x0000008000000000ull) >> 36;
+
+ if (d_type) { /* repeat */
+ entries += d_len;
+ } else { /* size in bytes! */
+ entries += d_len >> 3;
+ }
+
+ l_addr += 8;
+ }
+ }
+
+ for (ring = 0; ring < 8; ring++) {
+ addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
+ val = __genwqe_readq(cd, addr);
+
+ if ((val == 0x0ull) || (val == -1ull))
+ continue;
+
+ traps = (val >> 24) & 0xff;
+ traces = (val >> 16) & 0xff;
+ trace_entries = val & 0xffff;
+
+ entries += traps + (traces * trace_entries);
+ }
+ return entries;
+}
+
+/**
+ * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
+ */
+int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
+ struct genwqe_reg *regs, unsigned int max_regs)
+{
+ int i, traps, traces, trace, trace_entries, trace_entry, ring;
+ unsigned int idx = 0;
+ u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
+ u64 eevptr, e, val, addr;
+
+ eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
+ eevptr = __genwqe_readq(cd, eevptr_addr);
+
+ if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
+ l_addr = GENWQE_UID_OFFS(uid) | eevptr;
+ while (1) {
+ e = __genwqe_readq(cd, l_addr);
+ if ((e == 0x0) || (e == 0xffffffffffffffffull))
+ break;
+
+ d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
+ d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
+ d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
+ d_addr |= GENWQE_UID_OFFS(uid);
+
+ if (d_type) {
+ for (i = 0; i < (int)d_len; i++) {
+ val = __genwqe_readq(cd, d_addr);
+ set_reg_idx(cd, regs, &idx, max_regs,
+ d_addr, i, val);
+ }
+ } else {
+ d_len >>= 3; /* Size in bytes! */
+ for (i = 0; i < (int)d_len; i++, d_addr += 8) {
+ val = __genwqe_readq(cd, d_addr);
+ set_reg_idx(cd, regs, &idx, max_regs,
+ d_addr, 0, val);
+ }
+ }
+ l_addr += 8;
+ }
+ }
+
+ /*
+ * To save time, there are only 6 traces poplulated on Uid=2,
+ * Ring=1. each with iters=512.
+ */
+ for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
+ 2...7 are ASI rings */
+ addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
+ val = __genwqe_readq(cd, addr);
+
+ if ((val == 0x0ull) || (val == -1ull))
+ continue;
+
+ traps = (val >> 24) & 0xff; /* Number of Traps */
+ traces = (val >> 16) & 0xff; /* Number of Traces */
+ trace_entries = val & 0xffff; /* Entries per trace */
+
+ /* Note: This is a combined loop that dumps both the traps */
+ /* (for the trace == 0 case) as well as the traces 1 to */
+ /* 'traces'. */
+ for (trace = 0; trace <= traces; trace++) {
+ u32 diag_sel =
+ GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
+
+ addr = (GENWQE_UID_OFFS(uid) |
+ IO_EXTENDED_DIAG_SELECTOR);
+ __genwqe_writeq(cd, addr, diag_sel);
+
+ for (trace_entry = 0;
+ trace_entry < (trace ? trace_entries : traps);
+ trace_entry++) {
+ addr = (GENWQE_UID_OFFS(uid) |
+ IO_EXTENDED_DIAG_READ_MBX);
+ val = __genwqe_readq(cd, addr);
+ set_reg_idx(cd, regs, &idx, max_regs, addr,
+ (diag_sel<<16) | trace_entry, val);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * genwqe_write_vreg() - Write register in virtual window
+ *
+ * Note, these registers are only accessible to the PF through the
+ * VF-window. It is not intended for the VF to access.
+ */
+int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
+{
+ __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
+ __genwqe_writeq(cd, reg, val);
+ return 0;
+}
+
+/**
+ * genwqe_read_vreg() - Read register in virtual window
+ *
+ * Note, these registers are only accessible to the PF through the
+ * VF-window. It is not intended for the VF to access.
+ */
+u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
+{
+ __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
+ return __genwqe_readq(cd, reg);
+}
+
+/**
+ * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
+ *
+ * Note: From a design perspective it turned out to be a bad idea to
+ * use codes here to specifiy the frequency/speed values. An old
+ * driver cannot understand new codes and is therefore always a
+ * problem. Better is to measure out the value or put the
+ * speed/frequency directly into a register which is always a valid
+ * value for old as well as for new software.
+ *
+ * Return: Card clock in MHz
+ */
+int genwqe_base_clock_frequency(struct genwqe_dev *cd)
+{
+ u16 speed; /* MHz MHz MHz MHz */
+ static const int speed_grade[] = { 250, 200, 166, 175 };
+
+ speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
+ if (speed >= ARRAY_SIZE(speed_grade))
+ return 0; /* illegal value */
+
+ return speed_grade[speed];
+}
+
+/**
+ * genwqe_stop_traps() - Stop traps
+ *
+ * Before reading out the analysis data, we need to stop the traps.
+ */
+void genwqe_stop_traps(struct genwqe_dev *cd)
+{
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
+}
+
+/**
+ * genwqe_start_traps() - Start traps
+ *
+ * After having read the data, we can/must enable the traps again.
+ */
+void genwqe_start_traps(struct genwqe_dev *cd)
+{
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
+
+ if (genwqe_need_err_masking(cd))
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
+}
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h
new file mode 100644
index 0000000..46e916b
--- /dev/null
+++ b/drivers/misc/genwqe/genwqe_driver.h
@@ -0,0 +1,77 @@
+#ifndef __GENWQE_DRIVER_H__
+#define __GENWQE_DRIVER_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+#include <linux/iommu.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#include <asm/byteorder.h>
+#include <linux/genwqe/genwqe_card.h>
+
+#define DRV_VERS_STRING "2.0.0"
+
+/*
+ * Static minor number assignement, until we decide/implement
+ * something dynamic.
+ */
+#define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */
+
+/**
+ * genwqe_requ_alloc() - Allocate a new DDCB execution request
+ *
+ * This data structure contains the user visiable fields of the DDCB
+ * to be executed.
+ *
+ * Return: ptr to genwqe_ddcb_cmd data structure
+ */
+struct genwqe_ddcb_cmd *ddcb_requ_alloc(void);
+
+/**
+ * ddcb_requ_free() - Free DDCB execution request.
+ * @req: ptr to genwqe_ddcb_cmd data structure.
+ */
+void ddcb_requ_free(struct genwqe_ddcb_cmd *req);
+
+u32 genwqe_crc32(u8 *buff, size_t len, u32 init);
+
+static inline void genwqe_hexdump(struct pci_dev *pci_dev,
+ const void *buff, unsigned int size)
+{
+ char prefix[32];
+
+ scnprintf(prefix, sizeof(prefix), "%s %s: ",
+ GENWQE_DEVNAME, pci_name(pci_dev));
+
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff,
+ size, true);
+}
+
+#endif /* __GENWQE_DRIVER_H__ */
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index a2edb2e..49c7a23 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -224,7 +224,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
#ifdef CONFIG_IDE
-int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
struct block_device *bdev, unsigned int cmd,
unsigned long arg)
{
@@ -334,9 +334,10 @@ static void execute_location(void *dst)
static void execute_user_location(void *dst)
{
+ /* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
- if (copy_to_user(dst, do_nothing, EXEC_SIZE))
+ if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
func();
}
@@ -408,6 +409,8 @@ static void lkdtm_do_action(enum ctype which)
case CT_SPINLOCKUP:
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
break;
case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d22c686..2fad844 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -177,7 +177,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
unsigned long timeout;
int i;
- /* Only Posible if we are in timeout */
+ /* Only possible if we are in timeout */
if (!cl || cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "bad file ext.\n");
return -ETIMEDOUT;
@@ -249,7 +249,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
cb->response_buffer.size);
dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
- /* length is being turncated to PAGE_SIZE, however,
+ /* length is being truncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
length = min_t(size_t, length, (cb->buf_idx - *offset));
@@ -316,6 +316,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
if (ret)
@@ -477,6 +478,7 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = 0;
if (*slots >= msg_slots) {
mei_hdr.length = len;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 87c96e4..1ee2b94 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -154,7 +154,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
return 0;
}
/**
- * mei_io_cb_alloc_resp_buf - allocate respose buffer
+ * mei_io_cb_alloc_resp_buf - allocate response buffer
*
* @cb: io callback structure
* @length: size of the buffer
@@ -207,7 +207,7 @@ int mei_cl_flush_queues(struct mei_cl *cl)
/**
- * mei_cl_init - initializes intialize cl.
+ * mei_cl_init - initializes cl.
*
* @cl: host client to be initialized
* @dev: mei device
@@ -263,10 +263,10 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
return NULL;
}
-/** mei_cl_link: allocte host id in the host map
+/** mei_cl_link: allocate host id in the host map
*
* @cl - host client
- * @id - fixed host id or -1 for genereting one
+ * @id - fixed host id or -1 for generic one
*
* returns 0 on success
* -EINVAL on incorrect values
@@ -282,19 +282,19 @@ int mei_cl_link(struct mei_cl *cl, int id)
dev = cl->dev;
- /* If Id is not asigned get one*/
+ /* If Id is not assigned get one*/
if (id == MEI_HOST_CLIENT_ID_ANY)
id = find_first_zero_bit(dev->host_clients_map,
MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
- dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+ dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
}
open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
- dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
+ dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
}
@@ -344,8 +344,6 @@ int mei_cl_unlink(struct mei_cl *cl)
cl->state = MEI_FILE_INITIALIZING;
- list_del_init(&cl->link);
-
return 0;
}
@@ -372,13 +370,14 @@ void mei_host_client_init(struct work_struct *work)
}
dev->dev_state = MEI_DEV_ENABLED;
+ dev->reset_count = 0;
mutex_unlock(&dev->device_lock);
}
/**
- * mei_cl_disconnect - disconnect host clinet form the me one
+ * mei_cl_disconnect - disconnect host client from the me one
*
* @cl: host client
*
@@ -457,7 +456,7 @@ free:
*
* @cl: private data of the file object
*
- * returns ture if other client is connected, 0 - otherwise.
+ * returns true if other client is connected, false - otherwise.
*/
bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
@@ -481,7 +480,7 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl)
}
/**
- * mei_cl_connect - connect host clinet to the me one
+ * mei_cl_connect - connect host client to the me one
*
* @cl: host client
*
@@ -729,6 +728,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = cb->internal;
if (*slots >= msg_slots) {
mei_hdr.length = len;
@@ -775,7 +775,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
* @cl: host client
* @cl: write callback with filled data
*
- * returns numbe of bytes sent on success, <0 on failure.
+ * returns number of bytes sent on success, <0 on failure.
*/
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
{
@@ -828,6 +828,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = cb->internal;
rets = mei_write_message(dev, &mei_hdr, buf->data);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index e3870f2..a3ae154 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -43,7 +43,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
mutex_lock(&dev->device_lock);
- /* if the driver is not enabled the list won't b consitent */
+ /* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
@@ -101,7 +101,7 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
/**
* mei_dbgfs_deregister - Remove the debugfs files and directories
- * @mei - pointer to mei device private dat
+ * @mei - pointer to mei device private data
*/
void mei_dbgfs_deregister(struct mei_device *dev)
{
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 9b3a0fb..28cd74c 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -28,9 +28,9 @@
*
* @dev: the device structure
*
- * returns none.
+ * returns 0 on success -ENOMEM on allocation failure
*/
-static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+static int mei_hbm_me_cl_allocate(struct mei_device *dev)
{
struct mei_me_client *clients;
int b;
@@ -44,7 +44,7 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
dev->me_clients_num++;
if (dev->me_clients_num == 0)
- return;
+ return 0;
kfree(dev->me_clients);
dev->me_clients = NULL;
@@ -56,12 +56,10 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
sizeof(struct mei_me_client), GFP_KERNEL);
if (!clients) {
dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
- dev->dev_state = MEI_DEV_RESETTING;
- mei_reset(dev, 1);
- return;
+ return -ENOMEM;
}
dev->me_clients = clients;
- return;
+ return 0;
}
/**
@@ -85,12 +83,12 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
}
/**
- * same_disconn_addr - tells if they have the same address
+ * mei_hbm_cl_addr_equal - tells if they have the same address
*
- * @file: private data of the file object.
- * @disconn: disconnection request.
+ * @cl: - client
+ * @buf: buffer with cl header
*
- * returns true if addres are same
+ * returns true if addresses are the same
*/
static inline
bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
@@ -128,6 +126,17 @@ static bool is_treat_specially_client(struct mei_cl *cl,
return false;
}
+/**
+ * mei_hbm_idle - set hbm to idle state
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_idle(struct mei_device *dev)
+{
+ dev->init_clients_timer = 0;
+ dev->hbm_state = MEI_HBM_IDLE;
+}
+
int mei_hbm_start_wait(struct mei_device *dev)
{
int ret;
@@ -137,7 +146,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->hbm_state == MEI_HBM_IDLE ||
- dev->hbm_state > MEI_HBM_START,
+ dev->hbm_state >= MEI_HBM_STARTED,
mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
@@ -153,12 +162,15 @@ int mei_hbm_start_wait(struct mei_device *dev)
* mei_hbm_start_req - sends start request message.
*
* @dev: the device structure
+ *
+ * returns 0 on success and < 0 on failure
*/
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_version_request *start_req;
const size_t len = sizeof(struct hbm_host_version_request);
+ int ret;
mei_hbm_hdr(mei_hdr, len);
@@ -170,12 +182,13 @@ int mei_hbm_start_req(struct mei_device *dev)
start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev_err(&dev->pdev->dev, "version message write failed\n");
- dev->dev_state = MEI_DEV_RESETTING;
- mei_reset(dev, 1);
- return -EIO;
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n",
+ ret);
+ return ret;
}
+
dev->hbm_state = MEI_HBM_START;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return 0;
@@ -186,13 +199,15 @@ int mei_hbm_start_req(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns none.
+ * returns 0 on success and < 0 on failure
*/
-static void mei_hbm_enum_clients_req(struct mei_device *dev)
+static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_enum_request *enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
+ int ret;
+
/* enumerate clients */
mei_hbm_hdr(mei_hdr, len);
@@ -200,14 +215,15 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev->dev_state = MEI_DEV_RESETTING;
- dev_err(&dev->pdev->dev, "enumeration request write failed.\n");
- mei_reset(dev, 1);
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n",
+ ret);
+ return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return;
+ return 0;
}
/**
@@ -215,7 +231,7 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns none.
+ * returns 0 on success and < 0 on failure
*/
static int mei_hbm_prop_req(struct mei_device *dev)
@@ -226,7 +242,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
const size_t len = sizeof(struct hbm_props_request);
unsigned long next_client_index;
unsigned long client_num;
-
+ int ret;
client_num = dev->me_client_presentation_num;
@@ -253,12 +269,11 @@ static int mei_hbm_prop_req(struct mei_device *dev)
prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req->address = next_client_index;
- if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev->dev_state = MEI_DEV_RESETTING;
- dev_err(&dev->pdev->dev, "properties request write failed\n");
- mei_reset(dev, 1);
-
- return -EIO;
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n",
+ ret);
+ return ret;
}
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
@@ -268,7 +283,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
}
/**
- * mei_hbm_stop_req_prepare - perpare stop request message
+ * mei_hbm_stop_req_prepare - prepare stop request message
*
* @dev - mei device
* @mei_hdr - mei message header
@@ -289,7 +304,7 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,
}
/**
- * mei_hbm_cl_flow_control_req - sends flow control requst.
+ * mei_hbm_cl_flow_control_req - sends flow control request.
*
* @dev: the device structure
* @cl: client info
@@ -451,7 +466,7 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
}
/**
- * mei_hbm_cl_connect_res - connect resposne from the ME
+ * mei_hbm_cl_connect_res - connect response from the ME
*
* @dev: the device structure
* @rs: connect response bus message
@@ -505,8 +520,8 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
/**
- * mei_hbm_fw_disconnect_req - disconnect request initiated by me
- * host sends disoconnect response
+ * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware
+ * host sends disconnect response
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message from the me
@@ -559,8 +574,10 @@ bool mei_hbm_version_is_supported(struct mei_device *dev)
*
* @dev: the device structure
* @mei_hdr: header of bus message
+ *
+ * returns 0 on success and < 0 on failure
*/
-void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
{
struct mei_bus_message *mei_msg;
struct mei_me_client *me_client;
@@ -577,8 +594,20 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+ /* ignore spurious message and prevent reset nesting
+ * hbm is put to idle during system reset
+ */
+ if (dev->hbm_state == MEI_HBM_IDLE) {
+ dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n");
+ return 0;
+ }
+
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n");
+
+ dev->init_clients_timer = 0;
+
version_res = (struct hbm_host_version_response *)mei_msg;
dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
@@ -597,73 +626,89 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
}
if (!mei_hbm_version_is_supported(dev)) {
- dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n");
+ dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n");
- dev->hbm_state = MEI_HBM_STOP;
+ dev->hbm_state = MEI_HBM_STOPPED;
mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
dev->wr_msg.data);
- mei_write_message(dev, &dev->wr_msg.hdr,
- dev->wr_msg.data);
+ if (mei_write_message(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data)) {
+ dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
+ return -EIO;
+ }
+ break;
+ }
- return;
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_START) {
+ dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
}
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->hbm_state == MEI_HBM_START) {
- dev->init_clients_timer = 0;
- mei_hbm_enum_clients_req(dev);
- } else {
- dev_err(&dev->pdev->dev, "reset: wrong host start response\n");
- mei_reset(dev, 1);
- return;
+ dev->hbm_state = MEI_HBM_STARTED;
+
+ if (mei_hbm_enum_clients_req(dev)) {
+ dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n");
+ return -EIO;
}
wake_up_interruptible(&dev->wait_recvd_msg);
- dev_dbg(&dev->pdev->dev, "host start response message received.\n");
break;
case CLIENT_CONNECT_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n");
+
connect_res = (struct hbm_client_connect_response *) mei_msg;
mei_hbm_cl_connect_res(dev, connect_res);
- dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case CLIENT_DISCONNECT_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n");
+
disconnect_res = (struct hbm_client_connect_response *) mei_msg;
mei_hbm_cl_disconnect_res(dev, disconnect_res);
- dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case MEI_FLOW_CONTROL_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n");
+
flow_control = (struct hbm_flow_control *) mei_msg;
mei_hbm_cl_flow_control_res(dev, flow_control);
- dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n");
+
+ dev->init_clients_timer = 0;
+
+ if (dev->me_clients == NULL) {
+ dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n");
+ return -EPROTO;
+ }
+
props_res = (struct hbm_props_response *)mei_msg;
me_client = &dev->me_clients[dev->me_client_presentation_num];
- if (props_res->status || !dev->me_clients) {
- dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n");
- mei_reset(dev, 1);
- return;
+ if (props_res->status) {
+ dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n",
+ props_res->status);
+ return -EPROTO;
}
if (me_client->client_id != props_res->address) {
- dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n");
- mei_reset(dev, 1);
- return;
+ dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n",
+ me_client->client_id, props_res->address);
+ return -EPROTO;
}
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
- dev_err(&dev->pdev->dev, "reset: unexpected properties response\n");
- mei_reset(dev, 1);
-
- return;
+ dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
}
me_client->props = props_res->client_properties;
@@ -671,49 +716,70 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->me_client_presentation_num++;
/* request property for the next client */
- mei_hbm_prop_req(dev);
+ if (mei_hbm_prop_req(dev))
+ return -EIO;
break;
case HOST_ENUM_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n");
+
+ dev->init_clients_timer = 0;
+
enum_res = (struct hbm_host_enum_response *) mei_msg;
BUILD_BUG_ON(sizeof(dev->me_clients_map)
< sizeof(enum_res->valid_addresses));
memcpy(dev->me_clients_map, enum_res->valid_addresses,
sizeof(enum_res->valid_addresses));
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
- dev->init_clients_timer = 0;
- mei_hbm_me_cl_allocate(dev);
- dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
-
- /* first property reqeust */
- mei_hbm_prop_req(dev);
- } else {
- dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n");
- mei_reset(dev, 1);
- return;
+
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
+ dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
+ }
+
+ if (mei_hbm_me_cl_allocate(dev)) {
+ dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n");
+ return -ENOMEM;
}
+
+ dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
+
+ /* first property request */
+ if (mei_hbm_prop_req(dev))
+ return -EIO;
+
break;
case HOST_STOP_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n");
+
+ dev->init_clients_timer = 0;
- if (dev->hbm_state != MEI_HBM_STOP)
- dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n");
- dev->dev_state = MEI_DEV_DISABLED;
- dev_info(&dev->pdev->dev, "reset: FW stop response.\n");
- mei_reset(dev, 1);
+ if (dev->hbm_state != MEI_HBM_STOPPED) {
+ dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
+ }
+
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n");
+ /* force the reset */
+ return -EPROTO;
break;
case CLIENT_DISCONNECT_REQ_CMD:
- /* search for client */
+ dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n");
+
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_hbm_fw_disconnect_req(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n");
- dev->hbm_state = MEI_HBM_STOP;
+ dev->hbm_state = MEI_HBM_STOPPED;
mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
dev->wr_ext_msg.data);
break;
@@ -722,5 +788,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
}
+ return 0;
}
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 4ae2e56..5f92188 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -32,13 +32,13 @@ struct mei_cl;
enum mei_hbm_state {
MEI_HBM_IDLE = 0,
MEI_HBM_START,
+ MEI_HBM_STARTED,
MEI_HBM_ENUM_CLIENTS,
MEI_HBM_CLIENT_PROPERTIES,
- MEI_HBM_STARTED,
- MEI_HBM_STOP,
+ MEI_HBM_STOPPED,
};
-void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
{
@@ -49,6 +49,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->reserved = 0;
}
+void mei_hbm_idle(struct mei_device *dev);
int mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_start_wait(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3412adc..6f656c0 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -185,7 +185,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
mei_me_reg_write(hw, H_CSR, hcsr);
- if (dev->dev_state == MEI_DEV_POWER_DOWN)
+ if (intr_enable == false)
mei_me_hw_reset_release(dev);
return 0;
@@ -469,7 +469,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_cl_cb complete_list;
s32 slots;
- int rets;
+ int rets = 0;
dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
@@ -482,15 +482,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
mei_clear_interrupts(dev);
/* check if ME wants a reset */
- if (!mei_hw_is_ready(dev) &&
- dev->dev_state != MEI_DEV_RESETTING &&
- dev->dev_state != MEI_DEV_INITIALIZING &&
- dev->dev_state != MEI_DEV_POWER_DOWN &&
- dev->dev_state != MEI_DEV_POWER_UP) {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
+ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
+ schedule_work(&dev->reset_work);
+ goto end;
}
/* check if we need to start the dev */
@@ -500,15 +495,12 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
dev->recvd_hw_ready = true;
wake_up_interruptible(&dev->wait_hw_ready);
-
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
} else {
+
dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
mei_me_hw_reset_release(dev);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
}
+ goto end;
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
@@ -516,21 +508,23 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* we have urgent data to send so break the read */
if (dev->wr_ext_msg.hdr.length)
break;
- dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
- dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+ dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &complete_list, &slots);
- if (rets)
+ if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ schedule_work(&dev->reset_work);
goto end;
+ }
}
+
rets = mei_irq_write_handler(dev, &complete_list);
-end:
- dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
- mutex_unlock(&dev->device_lock);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
mei_irq_compl_handler(dev, &complete_list);
+end:
+ dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
+ mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
static const struct mei_hw_ops mei_me_hw_ops = {
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index cb2f556..dd44e33 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -111,7 +111,8 @@ struct mei_msg_hdr {
u32 me_addr:8;
u32 host_addr:8;
u32 length:9;
- u32 reserved:6;
+ u32 reserved:5;
+ u32 internal:1;
u32 msg_complete:1;
} __packed;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index f7f3abb..cdd31c2 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -43,41 +43,119 @@ const char *mei_dev_state_str(int state)
#undef MEI_DEV_STATE
}
-void mei_device_init(struct mei_device *dev)
-{
- /* setup our list array */
- INIT_LIST_HEAD(&dev->file_list);
- INIT_LIST_HEAD(&dev->device_list);
- mutex_init(&dev->device_lock);
- init_waitqueue_head(&dev->wait_hw_ready);
- init_waitqueue_head(&dev->wait_recvd_msg);
- init_waitqueue_head(&dev->wait_stop_wd);
- dev->dev_state = MEI_DEV_INITIALIZING;
- mei_io_list_init(&dev->read_list);
- mei_io_list_init(&dev->write_list);
- mei_io_list_init(&dev->write_waiting_list);
- mei_io_list_init(&dev->ctrl_wr_list);
- mei_io_list_init(&dev->ctrl_rd_list);
+/**
+ * mei_cancel_work. Cancel mei background jobs
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success or < 0 if the reset hasn't succeeded
+ */
+void mei_cancel_work(struct mei_device *dev)
+{
+ cancel_work_sync(&dev->init_work);
+ cancel_work_sync(&dev->reset_work);
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
+ cancel_delayed_work(&dev->timer_work);
+}
+EXPORT_SYMBOL_GPL(mei_cancel_work);
- INIT_LIST_HEAD(&dev->wd_cl.link);
- INIT_LIST_HEAD(&dev->iamthif_cl.link);
- mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
+/**
+ * mei_reset - resets host and fw.
+ *
+ * @dev: the device structure
+ */
+int mei_reset(struct mei_device *dev)
+{
+ enum mei_dev_state state = dev->dev_state;
+ bool interrupts_enabled;
+ int ret;
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
+ if (state != MEI_DEV_INITIALIZING &&
+ state != MEI_DEV_DISABLED &&
+ state != MEI_DEV_POWER_DOWN &&
+ state != MEI_DEV_POWER_UP)
+ dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
+ mei_dev_state_str(state));
- /*
- * Reserving the first client ID
- * 0: Reserved for MEI Bus Message communications
+ /* we're already in reset, cancel the init timer
+ * if the reset was called due the hbm protocol error
+ * we need to call it before hw start
+ * so the hbm watchdog won't kick in
*/
- bitmap_set(dev->host_clients_map, 0, 1);
+ mei_hbm_idle(dev);
+
+ /* enter reset flow */
+ interrupts_enabled = state != MEI_DEV_POWER_DOWN;
+ dev->dev_state = MEI_DEV_RESETTING;
+
+ dev->reset_count++;
+ if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
+ dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ return -ENODEV;
+ }
+
+ ret = mei_hw_reset(dev, interrupts_enabled);
+ /* fall through and remove the sw state even if hw reset has failed */
+
+ /* no need to clean up software state in case of power up */
+ if (state != MEI_DEV_INITIALIZING &&
+ state != MEI_DEV_POWER_UP) {
+
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
+
+ mei_cl_all_disconnect(dev);
+
+ /* wake up all readers and writers so they can be interrupted */
+ mei_cl_all_wakeup(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
+ mei_cl_unlink(&dev->wd_cl);
+ mei_cl_unlink(&dev->iamthif_cl);
+ mei_amthif_reset_params(dev);
+ memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
+ }
+
+
+ dev->me_clients_num = 0;
+ dev->rd_msg_hdr = 0;
+ dev->wd_pending = false;
+
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret);
+ dev->dev_state = MEI_DEV_DISABLED;
+ return ret;
+ }
+
+ if (state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(&dev->pdev->dev, "powering down: end of reset\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ return 0;
+ }
+
+ ret = mei_hw_start(dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret);
+ dev->dev_state = MEI_DEV_DISABLED;
+ return ret;
+ }
+
+ dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+
+ dev->dev_state = MEI_DEV_INIT_CLIENTS;
+ ret = mei_hbm_start_req(dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret);
+ dev->dev_state = MEI_DEV_DISABLED;
+ return ret;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mei_device_init);
+EXPORT_SYMBOL_GPL(mei_reset);
/**
* mei_start - initializes host and fw to start work.
@@ -90,14 +168,21 @@ int mei_start(struct mei_device *dev)
{
mutex_lock(&dev->device_lock);
- /* acknowledge interrupt and stop interupts */
+ /* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
mei_hw_config(dev);
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
- mei_reset(dev, 1);
+ dev->dev_state = MEI_DEV_INITIALIZING;
+ dev->reset_count = 0;
+ mei_reset(dev);
+
+ if (dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "reset failed");
+ goto err;
+ }
if (mei_hbm_start_wait(dev)) {
dev_err(&dev->pdev->dev, "HBM haven't started");
@@ -132,101 +217,64 @@ err:
EXPORT_SYMBOL_GPL(mei_start);
/**
- * mei_reset - resets host and fw.
+ * mei_restart - restart device after suspend
*
* @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
+ *
+ * returns 0 on success or -ENODEV if the restart hasn't succeeded
*/
-void mei_reset(struct mei_device *dev, int interrupts_enabled)
+int mei_restart(struct mei_device *dev)
{
- bool unexpected;
- int ret;
-
- unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
- dev->dev_state != MEI_DEV_DISABLED &&
- dev->dev_state != MEI_DEV_POWER_DOWN &&
- dev->dev_state != MEI_DEV_POWER_UP);
+ int err;
- if (unexpected)
- dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
- mei_dev_state_str(dev->dev_state));
-
- ret = mei_hw_reset(dev, interrupts_enabled);
- if (ret) {
- dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n");
- interrupts_enabled = false;
- dev->dev_state = MEI_DEV_DISABLED;
- }
-
- dev->hbm_state = MEI_HBM_IDLE;
+ mutex_lock(&dev->device_lock);
- if (dev->dev_state != MEI_DEV_INITIALIZING &&
- dev->dev_state != MEI_DEV_POWER_UP) {
- if (dev->dev_state != MEI_DEV_DISABLED &&
- dev->dev_state != MEI_DEV_POWER_DOWN)
- dev->dev_state = MEI_DEV_RESETTING;
+ mei_clear_interrupts(dev);
- /* remove all waiting requests */
- mei_cl_all_write_clear(dev);
+ dev->dev_state = MEI_DEV_POWER_UP;
+ dev->reset_count = 0;
- mei_cl_all_disconnect(dev);
+ err = mei_reset(dev);
- /* wake up all readings so they can be interrupted */
- mei_cl_all_wakeup(dev);
-
- /* remove entry if already in list */
- dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
- mei_cl_unlink(&dev->wd_cl);
- mei_cl_unlink(&dev->iamthif_cl);
- mei_amthif_reset_params(dev);
- memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
- }
+ mutex_unlock(&dev->device_lock);
- /* we're already in reset, cancel the init timer */
- dev->init_clients_timer = 0;
+ if (err || dev->dev_state == MEI_DEV_DISABLED)
+ return -ENODEV;
- dev->me_clients_num = 0;
- dev->rd_msg_hdr = 0;
- dev->wd_pending = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_restart);
- if (!interrupts_enabled) {
- dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n");
- return;
- }
- ret = mei_hw_start(dev);
- if (ret) {
- dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n");
- dev->dev_state = MEI_DEV_DISABLED;
- return;
- }
+static void mei_reset_work(struct work_struct *work)
+{
+ struct mei_device *dev =
+ container_of(work, struct mei_device, reset_work);
- dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
- /* link is established * start sending messages. */
+ mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_INIT_CLIENTS;
+ mei_reset(dev);
- mei_hbm_start_req(dev);
+ mutex_unlock(&dev->device_lock);
+ if (dev->dev_state == MEI_DEV_DISABLED)
+ dev_err(&dev->pdev->dev, "reset failed");
}
-EXPORT_SYMBOL_GPL(mei_reset);
void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
- flush_scheduled_work();
+ mei_cancel_work(dev);
- mutex_lock(&dev->device_lock);
+ mei_nfc_host_exit(dev);
- cancel_delayed_work(&dev->timer_work);
+ mutex_lock(&dev->device_lock);
mei_wd_stop(dev);
- mei_nfc_host_exit();
-
dev->dev_state = MEI_DEV_POWER_DOWN;
- mei_reset(dev, 0);
+ mei_reset(dev);
mutex_unlock(&dev->device_lock);
@@ -236,3 +284,41 @@ EXPORT_SYMBOL_GPL(mei_stop);
+void mei_device_init(struct mei_device *dev)
+{
+ /* setup our list array */
+ INIT_LIST_HEAD(&dev->file_list);
+ INIT_LIST_HEAD(&dev->device_list);
+ mutex_init(&dev->device_lock);
+ init_waitqueue_head(&dev->wait_hw_ready);
+ init_waitqueue_head(&dev->wait_recvd_msg);
+ init_waitqueue_head(&dev->wait_stop_wd);
+ dev->dev_state = MEI_DEV_INITIALIZING;
+ dev->reset_count = 0;
+
+ mei_io_list_init(&dev->read_list);
+ mei_io_list_init(&dev->write_list);
+ mei_io_list_init(&dev->write_waiting_list);
+ mei_io_list_init(&dev->ctrl_wr_list);
+ mei_io_list_init(&dev->ctrl_rd_list);
+
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ INIT_WORK(&dev->init_work, mei_host_client_init);
+ INIT_WORK(&dev->reset_work, mei_reset_work);
+
+ INIT_LIST_HEAD(&dev->wd_cl.link);
+ INIT_LIST_HEAD(&dev->iamthif_cl.link);
+ mei_io_list_init(&dev->amthif_cmd_list);
+ mei_io_list_init(&dev->amthif_rd_complete_list);
+
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first client ID
+ * 0: Reserved for MEI Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mei_device_init);
+
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 7a95c07..f0fbb51 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -31,7 +31,7 @@
/**
- * mei_irq_compl_handler - dispatch complete handelers
+ * mei_irq_compl_handler - dispatch complete handlers
* for the completed callbacks
*
* @dev - mei device
@@ -301,13 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- int ret = 0;
+ struct mei_cl *cl;
+ int ret;
if (!dev->rd_msg_hdr) {
dev->rd_msg_hdr = mei_read_hdr(dev);
- dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
@@ -315,61 +313,67 @@ int mei_irq_read_handler(struct mei_device *dev,
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
- dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
+ dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n",
+ dev->rd_msg_hdr);
ret = -EBADMSG;
goto end;
}
- if (mei_hdr->host_addr || mei_hdr->me_addr) {
- list_for_each_entry_safe(cl_pos, cl_next,
- &dev->file_list, link) {
- dev_dbg(&dev->pdev->dev,
- "list_for_each_entry_safe read host"
- " client = %d, ME client = %d\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- if (mei_cl_hbm_equal(cl_pos, mei_hdr))
- break;
- }
-
- if (&cl_pos->link == &dev->file_list) {
- dev_dbg(&dev->pdev->dev, "corrupted message header\n");
- ret = -EBADMSG;
- goto end;
- }
- }
- if (((*slots) * sizeof(u32)) < mei_hdr->length) {
- dev_err(&dev->pdev->dev,
- "we can't read the message slots =%08x.\n",
+ if (mei_slots2data(*slots) < mei_hdr->length) {
+ dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
ret = -ERANGE;
goto end;
}
- /* decide where to read the message too */
- if (!mei_hdr->host_addr) {
- dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n");
- mei_hbm_dispatch(dev, mei_hdr);
- dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n");
- } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
- (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
- (dev->iamthif_state == MEI_IAMTHIF_READING)) {
+ /* HBM message */
+ if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
+ ret = mei_hbm_dispatch(dev, mei_hdr);
+ if (ret) {
+ dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n",
+ ret);
+ goto end;
+ }
+ goto reset_slots;
+ }
+
+ /* find recipient cl */
+ list_for_each_entry(cl, &dev->file_list, link) {
+ if (mei_cl_hbm_equal(cl, mei_hdr)) {
+ cl_dbg(dev, cl, "got a message\n");
+ break;
+ }
+ }
+
+ /* if no recipient cl was found we assume corrupted header */
+ if (&cl->link == &dev->file_list) {
+ dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n",
+ dev->rd_msg_hdr);
+ ret = -EBADMSG;
+ goto end;
+ }
- dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n");
- dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
+ if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
+ MEI_FILE_CONNECTED == dev->iamthif_cl.state &&
+ dev->iamthif_state == MEI_IAMTHIF_READING) {
ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
- if (ret)
+ if (ret) {
+ dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n",
+ ret);
goto end;
+ }
} else {
- dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
- dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
- if (ret)
+ if (ret) {
+ dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n",
+ ret);
goto end;
+ }
}
+reset_slots:
/* reset the number of slots and header */
*slots = mei_count_full_read_slots(dev);
dev->rd_msg_hdr = 0;
@@ -533,7 +537,6 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
*
* @work: pointer to the work_struct structure
*
- * NOTE: This function is called by timer interrupt work
*/
void mei_timer(struct work_struct *work)
{
@@ -548,24 +551,30 @@ void mei_timer(struct work_struct *work)
mutex_lock(&dev->device_lock);
- if (dev->dev_state != MEI_DEV_ENABLED) {
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
- if (dev->init_clients_timer) {
- if (--dev->init_clients_timer == 0) {
- dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
- dev->hbm_state);
- mei_reset(dev, 1);
- }
+
+ /* Catch interrupt stalls during HBM init handshake */
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->hbm_state != MEI_HBM_IDLE) {
+
+ if (dev->init_clients_timer) {
+ if (--dev->init_clients_timer == 0) {
+ dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n",
+ dev->hbm_state);
+ mei_reset(dev);
+ goto out;
}
}
- goto out;
}
+
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ goto out;
+
/*** connect/disconnect timeouts ***/
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if (cl_pos->timer_count) {
if (--cl_pos->timer_count == 0) {
- dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n");
- mei_reset(dev, 1);
+ dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
+ mei_reset(dev);
goto out;
}
}
@@ -573,8 +582,8 @@ void mei_timer(struct work_struct *work)
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
- dev_err(&dev->pdev->dev, "reset: amthif hanged.\n");
- mei_reset(dev, 1);
+ dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
+ mei_reset(dev);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
dev->iamthif_canceled = false;
@@ -627,7 +636,8 @@ void mei_timer(struct work_struct *work)
}
}
out:
- schedule_delayed_work(&dev->timer_work, 2 * HZ);
+ if (dev->dev_state != MEI_DEV_DISABLED)
+ schedule_delayed_work(&dev->timer_work, 2 * HZ);
mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 9661a81..5424f8f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -48,7 +48,7 @@
*
* @inode: pointer to inode structure
* @file: pointer to file structure
- e
+ *
* returns 0 on success, <0 on error
*/
static int mei_open(struct inode *inode, struct file *file)
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 406f68e..f7de95b 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -61,11 +61,16 @@ extern const uuid_le mei_wd_guid;
#define MEI_CLIENTS_MAX 256
/*
+ * maximum number of consecutive resets
+ */
+#define MEI_MAX_CONSEC_RESET 3
+
+/*
* Number of File descriptors/handles
* that can be opened to the driver.
*
* Limit to 255: 256 Total Clients
- * minus internal client for MEI Bus Messags
+ * minus internal client for MEI Bus Messages
*/
#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
@@ -178,9 +183,10 @@ struct mei_cl_cb {
unsigned long buf_idx;
unsigned long read_time;
struct file *file_object;
+ u32 internal:1;
};
-/* MEI client instance carried as file->pirvate_data*/
+/* MEI client instance carried as file->private_data*/
struct mei_cl {
struct list_head link;
struct mei_device *dev;
@@ -326,6 +332,7 @@ struct mei_cl_device {
/**
* struct mei_device - MEI private device struct
+ * @reset_count - limits the number of consecutive resets
* @hbm_state - state of host bus message protocol
* @mem_addr - mem mapped base register address
@@ -369,6 +376,7 @@ struct mei_device {
/*
* mei device states
*/
+ unsigned long reset_count;
enum mei_dev_state dev_state;
enum mei_hbm_state hbm_state;
u16 init_clients_timer;
@@ -427,6 +435,7 @@ struct mei_device {
bool iamthif_canceled;
struct work_struct init_work;
+ struct work_struct reset_work;
/* List of bus devices */
struct list_head device_list;
@@ -456,13 +465,25 @@ static inline u32 mei_data2slots(size_t length)
return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
}
+/**
+ * mei_slots2data- get data in slots - bytes from slots
+ * @slots - number of available slots
+ * returns - number of bytes in slots
+ */
+static inline u32 mei_slots2data(int slots)
+{
+ return slots * 4;
+}
+
/*
* mei init function prototypes
*/
void mei_device_init(struct mei_device *dev);
-void mei_reset(struct mei_device *dev, int interrupts);
+int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
+int mei_restart(struct mei_device *dev);
void mei_stop(struct mei_device *dev);
+void mei_cancel_work(struct mei_device *dev);
/*
* MEI interrupt functions prototype
@@ -510,7 +531,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
* NFC functions
*/
int mei_nfc_host_init(struct mei_device *dev);
-void mei_nfc_host_exit(void);
+void mei_nfc_host_exit(struct mei_device *dev);
/*
* NFC Client UUID
@@ -626,9 +647,9 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
int mei_register(struct mei_device *dev);
void mei_deregister(struct mei_device *dev);
-#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
#define MEI_HDR_PRM(hdr) \
(hdr)->host_addr, (hdr)->me_addr, \
- (hdr)->length, (hdr)->msg_complete
+ (hdr)->length, (hdr)->internal, (hdr)->msg_complete
#endif
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 994ca4a..a58320c 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -92,7 +92,7 @@ struct mei_nfc_hci_hdr {
* @cl: NFC host client
* @cl_info: NFC info host client
* @init_work: perform connection to the info client
- * @fw_ivn: NFC Intervace Version Number
+ * @fw_ivn: NFC Interface Version Number
* @vendor_id: NFC manufacturer ID
* @radio_type: NFC radio type
*/
@@ -163,7 +163,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
return 0;
default:
- dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n",
+ dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",
ndev->radio_type);
return -EINVAL;
@@ -175,14 +175,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
ndev->bus_name = "pn544";
return 0;
default:
- dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n",
+ dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",
ndev->radio_type);
return -EINVAL;
}
default:
- dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n",
+ dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n",
ndev->vendor_id);
return -EINVAL;
@@ -428,7 +428,7 @@ static void mei_nfc_init(struct work_struct *work)
mutex_unlock(&dev->device_lock);
if (mei_nfc_if_version(ndev) < 0) {
- dev_err(&dev->pdev->dev, "Could not get the NFC interfave version");
+ dev_err(&dev->pdev->dev, "Could not get the NFC interface version");
goto err;
}
@@ -469,7 +469,9 @@ static void mei_nfc_init(struct work_struct *work)
return;
err:
+ mutex_lock(&dev->device_lock);
mei_nfc_free(ndev);
+ mutex_unlock(&dev->device_lock);
return;
}
@@ -481,7 +483,7 @@ int mei_nfc_host_init(struct mei_device *dev)
struct mei_cl *cl_info, *cl = NULL;
int i, ret;
- /* already initialzed */
+ /* already initialized */
if (ndev->cl_info)
return 0;
@@ -547,12 +549,16 @@ err:
return ret;
}
-void mei_nfc_host_exit(void)
+void mei_nfc_host_exit(struct mei_device *dev)
{
struct mei_nfc_dev *ndev = &nfc_dev;
+ cancel_work_sync(&ndev->init_work);
+
+ mutex_lock(&dev->device_lock);
if (ndev->cl && ndev->cl->device)
mei_cl_remove_device(ndev->cl->device);
mei_nfc_free(ndev);
+ mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2cab3c0..ddadd08 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -144,6 +144,21 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "failed to get pci regions.\n");
goto disable_device;
}
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ }
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto release_regions;
+ }
+
+
/* allocates and initializes the mei dev structure */
dev = mei_me_dev_init(pdev);
if (!dev) {
@@ -197,8 +212,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
release_irq:
+ mei_cancel_work(dev);
mei_disable_interrupts(dev);
- flush_scheduled_work();
free_irq(pdev->irq, dev);
disable_msi:
pci_disable_msi(pdev);
@@ -306,16 +321,14 @@ static int mei_me_pci_resume(struct device *device)
return err;
}
- mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_UP;
- mei_clear_interrupts(dev);
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
+ err = mei_restart(dev);
+ if (err)
+ return err;
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
- return err;
+ return 0;
}
static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9e35421..f70945e 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -115,6 +115,7 @@ int mei_wd_send(struct mei_device *dev)
hdr.me_addr = dev->wd_cl.me_client_id;
hdr.msg_complete = 1;
hdr.reserved = 0;
+ hdr.internal = 0;
if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
hdr.length = MEI_WD_START_MSG_SIZE;
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 3574cc3..1a6edce 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -112,7 +112,7 @@ struct mic_device {
struct work_struct shutdown_work;
u8 state;
u8 shutdown_status;
- struct sysfs_dirent *state_sysfs;
+ struct kernfs_node *state_sysfs;
struct completion reset_wait;
void *log_buf_addr;
int *log_buf_len;
@@ -134,6 +134,8 @@ struct mic_device {
* @send_intr: Send an interrupt for a particular doorbell on the card.
* @ack_interrupt: Hardware specific operations to ack the h/w on
* receipt of an interrupt.
+ * @intr_workarounds: Hardware specific workarounds needed after
+ * handling an interrupt.
* @reset: Reset the remote processor.
* @reset_fw_ready: Reset firmware ready field.
* @is_fw_ready: Check if firmware is ready for OS download.
@@ -149,6 +151,7 @@ struct mic_hw_ops {
void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
void (*send_intr)(struct mic_device *mdev, int doorbell);
u32 (*ack_interrupt)(struct mic_device *mdev);
+ void (*intr_workarounds)(struct mic_device *mdev);
void (*reset)(struct mic_device *mdev);
void (*reset_fw_ready)(struct mic_device *mdev);
bool (*is_fw_ready)(struct mic_device *mdev);
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index ad838c7..c04a021 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -115,7 +115,7 @@ static irqreturn_t mic_shutdown_db(int irq, void *data)
struct mic_device *mdev = data;
struct mic_bootparam *bootparam = mdev->dp;
- mdev->ops->ack_interrupt(mdev);
+ mdev->ops->intr_workarounds(mdev);
switch (bootparam->shutdown_status) {
case MIC_HALTED:
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index e04bb4f..752ff87 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -369,7 +369,7 @@ static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
struct mic_vdev *mvdev = data;
struct mic_device *mdev = mvdev->mdev;
- mdev->ops->ack_interrupt(mdev);
+ mdev->ops->intr_workarounds(mdev);
schedule_work(&mvdev->virtio_bh_work);
return IRQ_HANDLED;
}
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 0dfa8a8..5562fdd 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -174,35 +174,38 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
}
/**
- * mic_ack_interrupt - Device specific interrupt handling.
- * @mdev: pointer to mic_device instance
+ * mic_x100_ack_interrupt - Read the interrupt sources register and
+ * clear it. This function will be called in the MSI/INTx case.
+ * @mdev: Pointer to mic_device instance.
*
- * Returns: bitmask of doorbell events triggered.
+ * Returns: bitmask of interrupt sources triggered.
*/
static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
{
- u32 reg = 0;
- struct mic_mw *mw = &mdev->mmio;
u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0;
+ u32 reg = mic_mmio_read(&mdev->mmio, sicr0);
+ mic_mmio_write(&mdev->mmio, reg, sicr0);
+ return reg;
+}
+
+/**
+ * mic_x100_intr_workarounds - These hardware specific workarounds are
+ * to be invoked everytime an interrupt is handled.
+ * @mdev: Pointer to mic_device instance.
+ *
+ * Returns: none
+ */
+static void mic_x100_intr_workarounds(struct mic_device *mdev)
+{
+ struct mic_mw *mw = &mdev->mmio;
/* Clear pending bit array. */
if (MIC_A0_STEP == mdev->stepping)
mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS +
MIC_X100_SBOX_MSIXPBACR);
- if (mdev->irq_info.num_vectors <= 1) {
- reg = mic_mmio_read(mw, sicr0);
-
- if (unlikely(!reg))
- goto done;
-
- mic_mmio_write(mw, reg, sicr0);
- }
-
if (mdev->stepping >= MIC_B0_STEP)
mdev->intr_ops->enable_interrupts(mdev);
-done:
- return reg;
}
/**
@@ -553,6 +556,7 @@ struct mic_hw_ops mic_x100_ops = {
.write_spad = mic_x100_write_spad,
.send_intr = mic_x100_send_intr,
.ack_interrupt = mic_x100_ack_interrupt,
+ .intr_workarounds = mic_x100_intr_workarounds,
.reset = mic_x100_hw_reset,
.reset_fw_ready = mic_x100_reset_fw_ready,
.is_fw_ready = mic_x100_is_fw_ready,
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 652593f..128d561 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -828,6 +828,7 @@ enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
enum xp_retval ret;
+ DEFINE_WAIT(wait);
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpInterrupted);
@@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
}
atomic_inc(&ch->n_on_msg_allocate_wq);
- ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+ ret = schedule_timeout(1);
+ finish_wait(&ch->msg_allocate_wq, &wait);
atomic_dec(&ch->n_on_msg_allocate_wq);
if (ch->flags & XPC_C_DISCONNECTING) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 8d64b68..3aed525 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -812,7 +812,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty)
kfree_skb(st_gdata->tx_skb);
st_gdata->tx_skb = NULL;
- tty->ops->flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
return;
}
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 96853a0..9d3dbb2 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -531,7 +531,6 @@ long st_kim_stop(void *kim_data)
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
- tty->ops->flush_buffer(tty);
}
/* send uninstall notification to UIM */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index c98b03b..d35cda0 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -165,7 +165,7 @@ static void vmci_guest_cid_update(u32 sub_id,
* true if required hypercalls (or fallback hypercalls) are
* supported by the host, false otherwise.
*/
-static bool vmci_check_host_caps(struct pci_dev *pdev)
+static int vmci_check_host_caps(struct pci_dev *pdev)
{
bool result;
struct vmci_resource_query_msg *msg;
@@ -176,7 +176,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)
check_msg = kmalloc(msg_size, GFP_KERNEL);
if (!check_msg) {
dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
- return false;
+ return -ENOMEM;
}
check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
@@ -196,7 +196,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)
__func__, result ? "PASSED" : "FAILED");
/* We need the vector. There are no fallbacks. */
- return result;
+ return result ? 0 : -ENXIO;
}
/*
@@ -564,12 +564,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
dev_warn(&pdev->dev,
"VMCI device unable to register notification bitmap with PPN 0x%x\n",
(u32) bitmap_ppn);
+ error = -ENXIO;
goto err_remove_vmci_dev_g;
}
}
/* Check host capabilities. */
- if (!vmci_check_host_caps(pdev))
+ error = vmci_check_host_caps(pdev);
+ if (error)
goto err_remove_bitmap;
/* Enable device. */
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d13..0f55589 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
return -ENOMEM;
}
info->map.cached =
- ioremap_cached(info->map.phys, info->map.size);
+ ioremap_cache(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7..4ced594 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
pr_debug("Port %d changed speed\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
pr_debug("Port %d changed duplex\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
- // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
- // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+ __get_state_machine_lock(port);
+ /* on link down we are zeroing duplex and speed since
+ * some of the adaptors(ce1000.lan) report full duplex/speed
+ * instead of N/A(duplex) / 0(speed).
+ *
+ * on link up we are forcing recheck on the duplex and speed since
+ * some of he adaptors(ce1000.lan) report.
+ */
if (link == BOND_LINK_UP) {
port->is_enabled = true;
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ pr_debug("Port %d changed link status to %s",
+ port->actor_port_number,
+ (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299..6191b55 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1763,7 +1763,7 @@ static int __bond_release_one(struct net_device *bond_dev,
}
if (all) {
- rcu_assign_pointer(bond->curr_active_slave, NULL);
+ RCU_INIT_POINTER(bond->curr_active_slave, NULL);
} else if (oldcurrent == slave) {
/*
* Note that we hold RTNL over this sequence, so there
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
}
-static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index fb3dd43..f615fde 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -113,7 +113,7 @@ static const struct net_device_ops hydra_netdev_ops = {
static int hydra_init(struct zorro_dev *z)
{
struct net_device *dev;
- unsigned long board = ZTWO_VADDR(z->resource.start);
+ unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start);
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 85ec4c2..ae2a12b 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -287,7 +287,7 @@ static const struct net_device_ops zorro8390_netdev_ops = {
};
static int zorro8390_init(struct net_device *dev, unsigned long board,
- const char *name, unsigned long ioaddr)
+ const char *name, void __iomem *ioaddr)
{
int i;
int err;
@@ -354,7 +354,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
/* Install the Interrupt handler */
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 0866e76..5613918 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -57,6 +57,7 @@
#include <linux/zorro.h>
#include <linux/bitops.h>
+#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -678,6 +679,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u32 serial;
int err;
r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
@@ -702,6 +704,7 @@ static int a2065_init_one(struct zorro_dev *z,
r1->name = dev->name;
r2->name = dev->name;
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
dev->dev_addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
dev->dev_addr[1] = 0x80;
@@ -710,11 +713,11 @@ static int a2065_init_one(struct zorro_dev *z,
dev->dev_addr[1] = 0x00;
dev->dev_addr[2] = 0x9f;
}
- dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
- dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
- dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
- dev->base_addr = ZTWO_VADDR(base_addr);
- dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
priv->ll = (volatile struct lance_regs *)dev->base_addr;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index c178eb4..b08101b 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -51,6 +51,7 @@
#include <linux/zorro.h>
#include <linux/bitops.h>
+#include <asm/byteorder.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
@@ -718,6 +719,7 @@ static int ariadne_init_one(struct zorro_dev *z,
struct resource *r1, *r2;
struct net_device *dev;
struct ariadne_private *priv;
+ u32 serial;
int err;
r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
@@ -741,14 +743,15 @@ static int ariadne_init_one(struct zorro_dev *z,
r1->name = dev->name;
r2->name = dev->name;
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
dev->dev_addr[0] = 0x00;
dev->dev_addr[1] = 0x60;
dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
- dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
- dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
- dev->base_addr = ZTWO_VADDR(base_addr);
- dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
dev->netdev_ops = &ariadne_netdev_ops;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1..248baf6 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Make sure pointer to data buffer is set */
wmb();
+ skb_tx_timestamp(skb);
+
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
/* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
arc_reg_set(priv, R_STATUS, TXPL_MASK);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760..2980175 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
* Mask some pcie error bits
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
- data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ if (pos) {
+ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+ data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ }
/* clear error status */
pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2..ec61190 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */
-#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */
+#define BNX2X_FP_STATE_DISABLED (1 << 2)
+#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
-#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{
bool rc = true;
- spin_lock(&fp->lock);
+ spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_LOCKED) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
/* we don't care if someone yielded */
fp->state = BNX2X_FP_STATE_NAPI;
}
- spin_unlock(&fp->lock);
+ spin_unlock_bh(&fp->lock);
return rc;
}
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
bool rc = false;
- spin_lock(&fp->lock);
+ spin_lock_bh(&fp->lock);
WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
- fp->state = BNX2X_FP_STATE_IDLE;
- spin_unlock(&fp->lock);
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
return rc;
}
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
- fp->state = BNX2X_FP_STATE_IDLE;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
/* true if a socket is polling, even if it did not get the lock */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
- WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
+ WARN_ON(!(fp->state & BNX2X_FP_OWNED));
return fp->state & BNX2X_FP_USER_PEND;
}
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ int rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_OWNED)
+ rc = false;
+ fp->state |= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+
+ return rc;
+}
#else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130..bf81156 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
struct sk_buff *skb = tx_buf->skb;
u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
int nbd;
+ u16 split_bd_len = 0;
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
- /* unmap first bd */
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- /* ...and the TSO split header bd since they have no mapping */
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
+ /* unmap first bd */
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+ DMA_TO_DEVICE);
+
/* now free frags */
while (nbd > 0) {
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{
int i;
- local_bh_disable();
for_each_rx_queue_cnic(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
- while (!bnx2x_fp_lock_napi(&bp->fp[i]))
- mdelay(1);
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
}
- local_bh_enable();
}
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- local_bh_disable();
for_each_eth_queue(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
- while (!bnx2x_fp_lock_napi(&bp->fp[i]))
- mdelay(1);
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
}
- local_bh_enable();
}
void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
bnx2x_napi_disable_cnic(bp);
}
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa..41f3ca5a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02..11fc795 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
} else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_disable_kr2(params, vars, phy);
}
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*edc_mode = EDC_MODE_ACTIVE_DAC;
else
check_limiting_mode = 1;
- } else if (copper_module_type &
- SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
"Passive Copper cable detected\n");
- *edc_mode =
- EDC_MODE_PASSIVE_DAC;
- } else {
- DP(NETIF_MSG_LINK,
- "Unknown copper-cable-type 0x%x !!!\n",
- copper_module_type);
- return -EINVAL;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
}
break;
}
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
(1<<11));
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
an_1000_val |= (1<<8);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* Set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
-
- /* Set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0ec..0067b97 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
- /* adjust igu_sb_cnt to MF for E1x */
- if (CHIP_IS_E1x(bp) && IS_MF(bp))
- bp->igu_sb_cnt /= E1HVN_MAX;
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
/* port info */
bnx2x_get_port_hwinfo(bp);
@@ -12942,25 +12942,26 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (bp->regview)
- iounmap(bp->regview);
+ if (remove_netdev) {
+ if (bp->regview)
+ iounmap(bp->regview);
- /* for vf doorbells are part of the regview and were unmapped along with
- * it. FW is only loaded by PF.
- */
- if (IS_PF(bp)) {
- if (bp->doorbells)
- iounmap(bp->doorbells);
+ /* For vfs, doorbells are part of the regview and were unmapped
+ * along with it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
- bnx2x_release_firmware(bp);
- }
- bnx2x_free_mem_bp(bp);
+ bnx2x_release_firmware(bp);
+ }
+ bnx2x_free_mem_bp(bp);
- if (remove_netdev)
free_netdev(dev);
- if (atomic_read(&pdev->enable_cnt) == 1)
- pci_release_regions(pdev);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+ }
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35..14ffb6e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92ab..18438a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
int read_lock;
int rc = 0;
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
spin_lock_bh(&exeq->lock);
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return read_lock;
list_for_each_entry(pos, &o->head, link) {
- if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
return 0;
+ }
r->set_pending(r);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e3..6a53c15 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
struct bnx2x_vlan_mac_ramrod_params {
/* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2e46c28..e7845e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* next state */
vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* record the accept flags in vfdb so hypervisor can modify them
+ * if necessary
+ */
+ bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
+ ramrod->rx_accept_flags;
vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
@@ -1224,39 +1229,43 @@ op_pending:
return;
}
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags)
{
- struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_rx_mode_ramrod_params *ramrod =
&vf->op_params.rx_mode;
- memset(ramrod, 0, sizeof(*ramrod));
-
- /* Prepare ramrod parameters */
- ramrod->cid = vfq->cid;
- ramrod->cl_id = vfq_cl_id(vf, vfq);
- ramrod->rx_mode_obj = &bp->rx_mode_obj;
- ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
-
- ramrod->rx_accept_flags = accept_flags;
- ramrod->tx_accept_flags = accept_flags;
- ramrod->pstate = &vf->filter_state;
- ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
-
- set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- set_bit(RAMROD_RX, &ramrod->ramrod_flags);
- set_bit(RAMROD_TX, &ramrod->ramrod_flags);
-
- ramrod->rdata =
- bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
- ramrod->rdata_mapping =
- bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
bnx2x_vfop_rxmode, cmd->done);
@@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
bnx2x_iov_static_resc(bp, vf);
}
- /* prepare msix vectors in VF configuration space */
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
- num_vf_queues);
+ num_vf_queues - 1);
DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
- vf_idx, num_vf_queues);
+ vf_idx, num_vf_queues - 1);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -3436,10 +3448,18 @@ out:
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_update_params *update_params;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- struct pf_vf_bulletin_content *bulletin = NULL;
+ unsigned long accept_flags;
+ int rc;
/* sanity and init */
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* update PF's copy of the VF's bulletin. No point in posting the vlan
* to the VF since it doesn't have anything to do with it. But it useful
* to store it here in case the VF is not up yet and we can only
- * configure the vlan later when it does.
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
*/
- bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
/* is vf initialized and queue set up? */
- q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
- if (vf->state == VF_ENABLED &&
- q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- /* configure the vlan in device on this vf's queue */
- unsigned long ramrod_flags = 0;
- unsigned long vlan_mac_flags = 0;
- struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_leading_vfq(vf, vlan_obj);
- struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_state_params q_params = {NULL};
- struct bnx2x_queue_update_params *update_params;
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
- memset(&ramrod_param, 0, sizeof(ramrod_param));
+ /* configure the vlan in device on this vf's queue */
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
- /* must lock vfpf channel to protect against vf flows */
- bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- /* remove existing vlans */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
- &ramrod_flags);
- if (rc) {
- BNX2X_ERR("failed to delete vlans\n");
- rc = -EINVAL;
- goto out;
- }
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (vlan)
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod_param.user_req.vlan_mac_flags);
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ rc = -EINVAL;
+ goto out;
+ }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
&update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure the new vlan to device */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- update_params->def_vlan = vlan;
- }
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ goto out;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
- }
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
+ */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- }
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a936..8c213fa52 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+ unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a15..0756d7d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
return -EINVAL;
}
- BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
/* A packet arriving the vf's mac should be accepted
- * with any vlan
+ * with any vlan, unless a vlan has already been
+ * configured.
*/
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
goto response;
}
}
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ int i;
+
+ /* search for vlan filters */
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (filters->filters[i].flags &
+ VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+ }
/* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
+ vf_op_params->rss_flags = 0;
+ vf_op_params->ramrod_flags = 0;
+
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f3dd93b..15a66e4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
- return (base > 0xffffdcc0) && (base + len + 8 < base);
+ return base + len + 8 < base;
}
/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6c93088..56e0415 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -228,6 +228,25 @@ struct tp_params {
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
};
struct vpd_params {
@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d6b12e0..fff02ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
- t->stids_in_use++;
+ /* IPv6 requires max of 520 bits or 16 cells in TCAM
+ * This is equivalent to 4 TIDs. With CLIP enabled it
+ * needs 2 TIDs.
+ */
+ if (family == PF_INET)
+ t->stids_in_use++;
+ else
+ t->stids_in_use += 4;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
- stid += t->stid_base;
+ stid -= t->nstids;
+ stid += t->sftid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
spin_lock_bh(&t->stid_lock);
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- t->stids_in_use--;
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
+ struct adapter *adap = container_of(t, struct adapter, tids);
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ __set_bit(0, t->stid_bmap);
+
return 0;
}
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
- lli.filt_mode = adap->filter_mode;
+ lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
- if (adap->filter_mode & F_PORT) {
+ if (adap->params.tp.vlan_pri_map & F_PORT) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
+ if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+ f->fs.val.proto = IPPROTO_TCP;
+ f->fs.mask.proto = ~0;
+ }
+
f->fs.dirsteer = 1;
f->fs.iq = queue;
/* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- int reset = 1, j;
+ int reset = 1;
/*
* Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
/*
* These are finalized by FW initialization, load their values now.
*/
- v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
- adap->params.tp.tre = TIMERRESOLUTION_GET(v);
- adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
- /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
- for (j = 0; j < NCHAN; j++)
- adap->params.tp.tx_modq[j] = j;
-
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &adap->filter_mode, 1,
- TP_VLAN_PRI_MAP);
-
+ t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f24..4dd0a82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 2987809..81e8402 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
#include "l2t.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "t4_regs.h"
#define VLAN_NONE 0xfff
@@ -411,6 +412,40 @@ done:
}
EXPORT_SYMBOL(cxgb4_l2t_get);
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+
+ /* Initialize each of the fields which we care about which are present
+ * in the Compressed Filter Tuple.
+ */
+ if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+ ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)l2t->lport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0)
+ ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+ if (tp->vnic_shift >= 0) {
+ u32 viid = cxgb4_port_viid(dev);
+ u32 vf = FW_VIID_VIN_GET(viid);
+ u32 pf = FW_VIID_PFN_GET(viid);
+ u32 vld = FW_VIID_VIVLD_GET(viid);
+
+ ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
+ V_FT_VNID_ID_PF(pf) |
+ V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+ }
+
+ return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1..85eb5c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority);
-
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc380c3..cc3511a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
#undef READ_FL_BUF
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+ (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 74a6fce..e1413ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
return 0;
}
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan;
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /* Cache the adapter's Compressed Filter Mode and global Incress
+ * Configuration.
+ */
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP);
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG);
+
+ /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+
+ /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0a8205d..4082522 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1171,10 +1171,50 @@
#define A_TP_TX_SCHED_PCMD 0x25
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
#define S_PORT 1
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1213,4 +1253,37 @@
#define V_CHIPID(x) ((x) << S_CHIPID)
#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+/* Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD 16
+#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF 0
+#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF 7
+#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD 16
+#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df6..4ccaf9a 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE3_MAX_RSS_QS 16
#define BE3_MAX_TX_QS 16
#define BE3_MAX_EVT_QS 16
+#define BE3_SRIOV_MAX_EVT_QS 8
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
struct list_head entry;
u32 flash_status;
- struct completion flash_compl;
+ struct completion et_cmd_compl;
struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1..94c35c8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
subsystem = resp_hdr->subsystem;
}
+ if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return 0;
+ }
+
if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
(opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
(subsystem == CMD_SUBSYSTEM_COMMON)) {
adapter->flash_status = compl_status;
- complete(&adapter->flash_compl);
+ complete(&adapter->et_cmd_compl);
}
if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
0x3ea83c02, 0x4a110304};
int status;
+ if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
status = -1;
else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(40000)))
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(40000)))
status = -1;
else
status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
+ struct be_cmd_resp_loopback_test *resp;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
- req->hdr.timeout = cpu_to_le32(4);
+ req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
- status = le32_to_cpu(resp->status);
- }
+ be_mcc_notify(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ wait_for_completion(&adapter->et_cmd_compl);
+ resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+
+ return status;
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0fde69d..a37039d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1776,6 +1776,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL;
+ struct device *dev = &adapter->pdev->dev;
struct be_eth_rx_d *rxd;
u64 page_dmaaddr = 0, frag_dmaaddr;
u32 posted, page_offset = 0;
@@ -1788,9 +1789,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
rx_stats(rxo)->rx_post_fail++;
break;
}
- page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
- 0, adapter->big_page_size,
+ page_dmaaddr = dma_map_page(dev, pagep, 0,
+ adapter->big_page_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, page_dmaaddr)) {
+ put_page(pagep);
+ pagep = NULL;
+ rx_stats(rxo)->rx_post_fail++;
+ break;
+ }
page_info->page_offset = 0;
} else {
get_page(pagep);
@@ -2744,13 +2751,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
if (!BEx_chip(adapter))
adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
RSS_ENABLE_UDP_IPV6;
+ } else {
+ /* Disable RSS, if only default RX Q is created */
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ }
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
- if (rc) {
- adapter->rss_flags = 0;
- return rc;
- }
+ rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 128);
+ if (rc) {
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ return rc;
}
/* First time posting */
@@ -3124,11 +3134,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
+ int max_vfs;
- if (BE3_chip(adapter) && sriov_want(adapter)) {
- int max_vfs;
+ max_vfs = pci_sriov_get_totalvfs(pdev);
- max_vfs = pci_sriov_get_totalvfs(pdev);
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
use_sriov = res->max_vfs;
}
@@ -3159,7 +3169,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
res->max_rx_qs = res->max_rss_qs + 1;
- res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+ if (be_physfn(adapter))
+ res->max_evt_qs = (max_vfs > 0) ?
+ BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+ else
+ res->max_evt_qs = 1;
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -4205,7 +4219,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
- init_completion(&adapter->flash_compl);
+ init_completion(&adapter->et_cmd_compl);
pci_save_state(adapter->pdev);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e7c8b74..50bb71c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(bdp, fep);
+ skb_tx_timestamp(skb);
+
fep->cur_tx = bdp;
if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e..ff2d806 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (ret_val)
+ return ret_val;
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945a..6d14eea 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7015,13 +7015,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
e1000_idle)
};
-#endif
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
@@ -7029,11 +7027,9 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &e1000_pm_ops,
},
-#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59..20e71f4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
- if (ret_val)
+ if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval / 1000);
+ msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc06854..5bcc870 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
-#ifdef IXGBE_FCOE
-static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
+ struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
+#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq;
+#endif
+
+ if (fwd_adapter)
+ return skb->queue_mapping + fwd_adapter->tx_base_queue;
+
+#ifdef IXGBE_FCOE
/*
* only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
txq -= f->indices;
return txq + f->offset;
+#else
+ return __netdev_pick_tx(dev, skb);
+#endif
}
-#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
-static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
- struct net_device *dev,
- void *priv)
-{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- unsigned int queue;
- struct ixgbe_ring *tx_ring;
-
- queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
- tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
-
- return __ixgbe_xmit_frame(skb, dev, tx_ring);
-}
-
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
-#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
-#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
};
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d..72084f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err;
+#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+#endif
err = ixgbe_disable_sriov(adapter);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f7..ec94a20 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
}
static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
+ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
/* we are currently only using the first queue */
return 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960..c4eeb69 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
if (time_is_before_jiffies(end))
++timedout;
} else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ if (timeout < 2)
+ timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
orion_mdio_smi_is_done(dev),
timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5..a7fcd59 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
}
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de..d5758ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 346a4e0..04b3ec1 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -52,7 +52,6 @@
#include <linux/bitrev.h>
#include <linux/slab.h>
-#include <asm/bootinfo.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/hwtest.h>
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd..cc68657 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
u32 seq_number;
u8 vhdr_len = 0;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0a..f2a7c71 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
struct qlcnic_mailbox *mailbox;
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
};
struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
} ____cacheline_internodealigned_in_smp;
/*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_ILB_MODE 0x1
#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
#define QLCNIC_LINKEVENT 0x1
#define QLCNIC_LB_RESPONSE 0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
struct qlcnic_filter_hash rx_fhash;
struct list_head vf_mc_list;
- spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
/* spinlock for catching rcv filters for eswitch traffic */
spinlock_t rx_mac_learn_lock;
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
/* Adapter hardware abstraction */
struct qlcnic_hardware_ops {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 6055d39..f776f99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1684,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
- /* Make sure carrier is off and queue is stopped during loopback */
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
-
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2121,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
ahw->link_autoneg = MSB(MSW(data[3]));
ahw->module_type = MSB(LSW(data[3]));
ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
qlcnic_advert_link_change(adapter, link_status);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index e3be276..6b08194 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
-static inline int qlcnic_82xx_statistics(void)
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
{
- return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
- ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
}
-static inline int qlcnic_83xx_statistics(void)
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
{
- return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
- ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
}
static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
{
- if (qlcnic_82xx_check(adapter))
- return qlcnic_82xx_statistics();
- else if (qlcnic_83xx_check(adapter))
- return qlcnic_83xx_statistics();
- else
- return -1;
+ int len = -1;
+
+ if (qlcnic_82xx_check(adapter)) {
+ len = qlcnic_82xx_statistics(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ } else if (qlcnic_83xx_check(adapter)) {
+ len = qlcnic_83xx_statistics(adapter);
+ }
+
+ return len;
}
#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
- int len;
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
- len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- qlcnic_83xx_check(adapter))
- return len;
- return qlcnic_82xx_statistics();
+ return qlcnic_dev_statistics_len(adapter);
default:
return -EOPNOTSUPP;
}
@@ -1267,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
return data;
}
-static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
{
struct qlcnic_host_tx_ring *tx_ring;
int ring;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5..c4262c2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_skb_frag *buffrag;
int i, j;
+ spin_lock(&tx_ring->tx_clean_lock);
+
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
}
cmd_buf++;
}
+
+ spin_unlock(&tx_ring->tx_clean_lock);
}
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index eda6c69..ad1531a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
adapter->ahw->linkup = 0;
netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
+ /* Do not advertise Link up if the port is in loopback mode */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+ return;
+
netdev_info(netdev, "NIC Link is up\n");
adapter->ahw->linkup = 1;
netif_carrier_on(netdev);
@@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
@@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
break;
}
+ tx_ring->sw_consumer = sw_consumer;
+
if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
smp_mb();
if (netif_tx_queue_stopped(tx_ring->txq) &&
netif_carrier_ok(netdev)) {
@@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
+
+ spin_unlock(&tx_ring->tx_clean_lock);
return done;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2c8cac0..550791b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
- spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
}
if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_update_stats(adapter);
+
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460..024f816 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
num_vfs = sriov->num_vfs;
max = num_vfs + 1;
info->bit_offsets = 0xffff;
- info->max_tx_ques = res->num_tx_queues / max;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_bw = vp->max_tx_bw;
info->max_rx_ucast_mac_filters = num_vf_macs;
info->max_tx_mac_filters = num_vf_macs;
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
}
info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 449f506..f705aee 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4765,6 +4765,8 @@ static int qlge_probe(struct pci_dev *pdev,
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
+ /* vlan gets same features (except vlan filter) */
+ ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a..797b56a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- if (netif_msg_hw(priv)) {
- if (priv->dma_cap.time_stamp) {
- pr_debug("IEEE 1588-2002 Time Stamp supported\n");
- priv->adv_ts = 0;
- }
- if (priv->dma_cap.atime_stamp && priv->extend_desc) {
- pr_debug
- ("IEEE 1588-2008 Advanced Time Stamp supported\n");
- priv->adv_ts = 1;
- }
- }
+ priv->adv_ts = 0;
+ if (priv->dma_cap.atime_stamp && priv->extend_desc)
+ priv->adv_ts = 1;
+
+ if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+
+ if (netif_msg_hw(priv) && priv->adv_ts)
+ pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eee..7680581 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
priv->hw->ptp->config_addend(priv->ioaddr, addend);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5120d9c..5330fd2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
+ else if (phy->speed == 10)
+ mac_control |= BIT(18); /* In Band mode */
*link = true;
} else {
@@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
- dev_name(priv->dev), priv)) {
+ dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736..0e9fb33 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
}
/* Return subqueue id on this core (one per core). */
-static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
return smp_processor_id();
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index cce6c4b..ef312bc 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
goto out_unlock;
napi_disable(&rp->napi);
+ netif_tx_disable(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252..5d78c1d 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+ return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411..61dd244 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCYAMGCFG:
+ memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f813572..71baeb3 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- if (!net) {
- netdev_err(net, "got receive callback but net device"
- " not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
@@ -435,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
- ret = register_netdev(net);
- if (ret != 0) {
- pr_err("Unable to register netdev.\n");
- free_netdev(net);
- goto out;
- }
-
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- unregister_netdev(net);
free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
@@ -456,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
netif_carrier_on(net);
-out:
+ ret = register_netdev(net);
+ if (ret != 0) {
+ pr_err("Unable to register netdev.\n");
+ rndis_filter_device_remove(dev);
+ free_netdev(net);
+ }
+
return ret;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf9379..bc8faae 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
if (vlan->fwd_priv) {
skb->dev = vlan->lowerdev;
- ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+ ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
} else {
ret = macvlan_queue_xmit(skb, dev);
}
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
.cache_update = eth_header_cache_update,
};
+static struct rtnl_link_ops macvlan_link_ops;
+
static int macvlan_open(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
- if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
+ dev->rtnl_link_ops == &macvlan_link_ops) {
vlan->fwd_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
*/
if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
vlan->fwd_priv = NULL;
- } else {
- dev->features &= ~NETIF_F_LLTX;
+ } else
return 0;
- }
}
err = -EBUSY;
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ netdev_features_t mask;
+
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (vlan->set_features | ~MACVLAN_FEATURES);
+ mask = features;
+
+ features = netdev_increment_features(vlan->lowerdev->features,
+ features,
+ mask);
+ features |= NETIF_F_LLTX;
- return features & (vlan->set_features | ~MACVLAN_FEATURES);
+ return features;
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- vlan->dev->features = dev->features & MACVLAN_FEATURES;
vlan->dev->gso_max_size = dev->gso_max_size;
- netdev_features_change(vlan->dev);
+ netdev_update_features(vlan->dev);
}
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994..98434b8 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
int err = 0;
atomic_set(&phydev->irq_disable, 0);
- if (request_irq(phydev->irq, phy_interrupt,
- IRQF_SHARED,
- "phy_interrupt",
- phydev) < 0) {
+ if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+ phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 736050d..b75ae5b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a..ecec802 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -348,7 +348,8 @@ unlock:
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a016..47b0f73 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
module will be called cdc_mbim.
config USB_NET_DM9601
- tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
+ tristate "Davicom DM96xx based USB 10/100 ethernet devices"
depends on USB_USBNET
select CRC32
help
- This option adds support for Davicom DM9601 based USB 1.1
- 10/100 Ethernet adapters.
+ This option adds support for Davicom DM9601/DM9620/DM9621A
+ based USB 10/100 Ethernet adapters.
config USB_NET_SR9700
tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f9..e802198 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
/*
- * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
+ * Davicom DM96xx USB 10/100Mbps ethernet devices
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
+
+ /* dm9620/21a require room for 4 byte padding, even in dm9601
+ * mode, so we need +1 to be able to receive full size
+ * ethernet frames.
+ */
+ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
- int len;
+ int len, pad;
/* format:
b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
b3..n: packet data
*/
- len = skb->len;
+ len = skb->len + DM_TX_OVERHEAD;
- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
+ /* workaround for dm962x errata with tx fifo getting out of
+ * sync if a USB bulk transfer retry happens right after a
+ * packet with odd / maxpacket length by adding up to 3 bytes
+ * padding.
+ */
+ while ((len & 1) || !(len % dev->maxpacket))
+ len++;
+
+ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
+ pad = len - skb->len;
+
+ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
struct sk_buff *skb2;
- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
+ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
__skb_push(skb, DM_TX_OVERHEAD);
- /* usbnet adds padding if length is a multiple of packet size
- if so, adjust length value in header */
- if ((skb->len % dev->maxpacket) == 0)
- len++;
+ if (pad) {
+ memset(skb->data + skb->len, 0, pad);
+ __skb_put(skb, pad);
+ }
skb->data[0] = len;
skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
}
static const struct driver_info dm9601_info = {
- .description = "Davicom DM9601 USB Ethernet",
+ .description = "Davicom DM96xx USB 10/100 Ethernet",
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.bind = dm9601_bind,
.rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,22 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
+ USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
+ USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
+ USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
@@ -612,5 +644,5 @@ static struct usb_driver dm9601_driver = {
module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
-MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
+MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6..1a48234 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
#define BM_REQUEST_TYPE (0xa1)
#define B_NOTIFICATION (0x20)
#define W_VALUE (0x0)
-#define W_INDEX (0x2)
#define W_LENGTH (0x2)
#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
struct uart_icount *icount;
struct hso_serial_state_notification *serial_state_notification;
struct usb_device *usb;
+ int if_num;
/* Sanity checks */
if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
handle_usb_error(status, __func__, serial->parent);
return;
}
+
+ /* tiocmget is only supported on HSO_PORT_MODEM */
tiocmget = serial->tiocmget;
if (!tiocmget)
return;
+ BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
+
usb = serial->parent->usb;
+ if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+
+ /* wIndex should be the USB interface number of the port to which the
+ * notification applies, which should always be the Modem port.
+ */
serial_state_notification = &tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
- le16_to_cpu(serial_state_notification->wIndex) != W_INDEX ||
+ le16_to_cpu(serial_state_notification->wIndex) != if_num ||
le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
dev_warn(&usb->dev,
"hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3..f546378 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,7 +117,6 @@ enum {
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
- u8 link_counter;
};
static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
bool link, link_changed;
- struct mcs7830_data *data = mcs7830_get_data(dev);
if (urb->actual_length < 16)
return;
- link = !(buf[1] & 0x20);
+ link = !(buf[1] == 0x20);
link_changed = netif_carrier_ok(dev->net) != link;
if (link_changed) {
- data->link_counter++;
- /*
- track link state 20 times to guard against erroneous
- link state changes reported sometimes by the chip
- */
- if (data->link_counter > 20) {
- data->link_counter = 0;
- usbnet_link_change(dev, link, 0);
- netdev_dbg(dev->net, "Link Status is: %d\n", link);
- }
- } else
- data->link_counter = 0;
+ usbnet_link_change(dev, link, 0);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
}
static const struct driver_info moschip_info = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8494bb5..aba04f5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
return -ENOMEM;
urb->num_sgs = num_sgs;
- sg_init_table(urb->sg, urb->num_sgs);
+ sg_init_table(urb->sg, urb->num_sgs + 1);
sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
total_len += skb_headlen(skb);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d208f86..5d77644 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
if (err)
return err;
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ for (i = 0; i < vi->curr_queue_pairs; i++)
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_enable(&vi->rq[i]);
+ }
netif_device_attach(vi->dev);
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
mutex_lock(&vi->config_lock);
vi->config_enable = true;
mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 249e01c..ed384fe 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- }
+ } else if (use_ipv6)
+ vxlan->flags |= VXLAN_F_IPV6;
if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253..a366d6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
mask2 |= ATH9K_INT_CST;
if (isr2 & AR_ISR_S2_TSFOOR)
mask2 |= ATH9K_INT_TSFOOR;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
}
- isr = REG_READ(ah, AR_ISR_RAC);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ isr = REG_READ(ah, AR_ISR_RAC);
+
if (isr == 0xffffffff) {
*masked = 0;
return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= ATH9K_INT_TX;
- s0_s = REG_READ(ah, AR_ISR_S0_S);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ } else {
+ s0_s = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0_s);
+ s1_s = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1_s);
+
+ isr &= ~(AR_ISR_TXOK |
+ AR_ISR_TXDESC |
+ AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
}
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= mask2;
}
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
+ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
u32 s5_s;
- s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ } else {
+ s5_s = REG_READ(ah, AR_ISR_S5);
+ }
+
ah->intr_gen_timer_trigger =
MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
*masked |= ATH9K_INT_TIM_TIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5_s);
+ isr &= ~AR_ISR_GENTMR;
+ }
}
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+ REG_READ(ah, AR_ISR);
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
if (sync_cause) {
ath9k_debug_sync_cause(common, sync_cause);
fatal_int =
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657f..608d739 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath9k_vif_iter_data *iter_data = data;
int i;
- for (i = 0; i < ETH_ALEN; i++)
- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ if (iter_data->hw_macaddr != NULL) {
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ } else {
+ iter_data->hw_macaddr = mac;
+ }
}
-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_vif_iter_data iter_data;
/*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
*/
- iter_data.hw_macaddr = common->macaddr;
+ iter_data.hw_macaddr = NULL;
memset(&iter_data.mask, 0xff, ETH_ALEN);
if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
ath9k_htc_bssid_iter, &iter_data);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+
+ if (iter_data.hw_macaddr)
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
ath_hw_setbssidmask(common);
}
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath9k_htc_set_bssid_mask(priv, vif);
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
priv->vif_slot |= (1 << avp->index);
priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
- ath9k_htc_set_bssid_mask(priv, vif);
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
/*
* Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c..21aa09e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(ah);
/*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
*/
memset(iter_data, 0, sizeof(*iter_data));
memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 8660502..e627254 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c72438b..a1b32ee 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
(hwsim_flags & HWSIM_TX_STAT_ACK)) {
if (skb->len >= 16) {
hdr = (struct ieee80211_hdr *) skb->data;
- mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
+ mac80211_hwsim_monitor_ack(data2->channel,
hdr->addr2);
}
txi->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a66..8bb8988 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
}
static u16
-mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
+mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
skb->priority = cfg80211_classify8021d(skb);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f49444..5a53195 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
+ if (rtlpci->driver_is_goingto_unload)
+ return;
/*RX NORMAL PKT */
while (count--) {
/*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
*/
set_hal_stop(rtlhal);
+ rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->disable_interrupt(hw);
cancel_work_sync(&rtlpriv->works.lps_change_work);
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
ppsc->rfchange_inprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
- rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->hw_disable(hw);
/* some things are not needed if firmware not available */
if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b..c47794b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -143,13 +150,13 @@ struct xenvif {
*/
RING_IDX rx_req_cons_peek;
- /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
- */
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+ /* This array is allocated seperately as it is large */
+ struct gnttab_copy *grant_copy_op;
+ /* We create one meta structure per ring request we consume, so
+ * the maximum number is the same as the ring size.
+ */
+ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 870f1fa..fff8cdd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
+
+ vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+ MAX_GRANT_COPY_OPS);
+ if (vif->grant_copy_op == NULL) {
+ pr_warn("Could not allocate grant copy space for %s\n", name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
unregister_netdev(vif->dev);
+ vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 27bbe58..7842555 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
if (!npo.copy_prod)
return;
- BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1209,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
goto out;
+ }
if (recalculate_partial_csum)
tcp_hdr(skb)->check =
@@ -1227,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
goto out;
+ }
if (recalculate_partial_csum)
udp_hdr(skb)->check =
@@ -1350,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
goto out;
+ }
if (recalculate_partial_csum)
tcp_hdr(skb)->check =
@@ -1368,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
goto out;
+ }
if (recalculate_partial_csum)
udp_hdr(skb)->check =
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f899..c6973f1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
depends on OF_IRQ
help
This option builds in test cases for the device tree infrastructure
- that are executed one at boot time, and the results dumped to the
+ that are executed once at boot time, and the results dumped to the
console.
If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317b..d3dd41c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
- /*
- * If the number of address cells is larger than 2 we assume the
- * mapping doesn't specify a physical address. Rather, the address
- * specifies an identifier that must match exactly.
- */
- if (na > 2 && memcmp(range, addr, na * 4) != 0)
- return OF_BAD_ADDR;
-
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b..758b4f8 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
*/
void __init unflatten_and_copy_device_tree(void)
{
- int size = __be32_to_cpu(initial_boot_params->totalsize);
- void *dt = early_init_dt_alloc_memory_arch(size,
+ int size;
+ void *dt;
+
+ if (!initial_boot_params) {
+ pr_warn("No valid device tree found, continuing without\n");
+ return;
+ }
+
+ size = __be32_to_cpu(initial_boot_params->totalsize);
+ dt = early_init_dt_alloc_memory_arch(size,
__alignof__(struct boot_param_header));
if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b4..2721240 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
- of_node_put(old);
return 0;
}
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* Successfully parsed an interrrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
- of_node_put(out_irq->np);
- out_irq->np = of_node_get(newpar);
+ out_irq->np = newpar;
match_array = imap - newaddrsize - newintsize;
for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
}
fail:
of_node_put(ipar);
- of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 7578d79..2f650f6 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -300,7 +300,7 @@ static int __init parport_mfc3_init(void)
if (!request_mem_region(piabase, sizeof(struct pia), "PIA"))
continue;
- pp = (struct pia *)ZTWO_VADDR(piabase);
+ pp = ZTWO_VADDR(piabase);
pp->crb = 0;
pp->pddrb = 255; /* all data pins output */
pp->crb = PIA_DDR|32|8;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 9637615..76ee775 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2600,8 +2600,6 @@ enum parport_pc_pci_cards {
syba_2p_epp,
syba_1p_ecp,
titan_010l,
- titan_1284p1,
- titan_1284p2,
avlab_1p,
avlab_2p,
oxsemi_952,
@@ -2660,8 +2658,6 @@ static struct parport_pc_pci {
/* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
- /* titan_1284p1 */ { 1, { { 0, 1 }, } },
- /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* avlab_1p */ { 1, { { 0, 1}, } },
/* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
/* The Oxford Semi cards are unusual: 954 doesn't support ECP,
@@ -2677,8 +2673,8 @@ static struct parport_pc_pci {
/* netmos_9705 */ { 1, { { 0, -1 }, } },
/* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
- /* netmos_9805 */ { 1, { { 0, -1 }, } },
- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
+ /* netmos_9805 */ { 1, { { 0, 1 }, } },
+ /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
@@ -2722,8 +2718,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
- { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
- { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
/* AFAVLAB_TK9902 */
{ 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
@@ -2827,16 +2821,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
if (irq == IRQ_NONE) {
printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
- parport_pc_pci_tbl[i + last_sio].vendor,
- parport_pc_pci_tbl[i + last_sio].device,
- io_lo, io_hi);
+ id->vendor, id->device, io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
- parport_pc_pci_tbl[i + last_sio].vendor,
- parport_pc_pci_tbl[i + last_sio].device,
- io_lo, io_hi, irq);
+ id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
parport_pc_probe_port(io_lo, io_hi, irq,
@@ -2866,8 +2856,6 @@ static void parport_pc_pci_remove(struct pci_dev *dev)
struct pci_parport_data *data = pci_get_drvdata(dev);
int i;
- pci_set_drvdata(dev, NULL);
-
if (data) {
for (i = data->num - 1; i >= 0; i--)
parport_pc_unregister_port(data->ports[i]);
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 1b8bdb7..ff53314 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -596,13 +596,11 @@ static int parport_serial_pci_probe(struct pci_dev *dev,
err = pci_enable_device (dev);
if (err) {
- pci_set_drvdata (dev, NULL);
kfree (priv);
return err;
}
if (parport_register (dev, id)) {
- pci_set_drvdata (dev, NULL);
kfree (priv);
return -ENODEV;
}
@@ -611,7 +609,6 @@ static int parport_serial_pci_probe(struct pci_dev *dev,
int i;
for (i = 0; i < priv->num_par; i++)
parport_pc_unregister_port (priv->port[i]);
- pci_set_drvdata (dev, NULL);
kfree (priv);
return -ENODEV;
}
@@ -624,8 +621,6 @@ static void parport_serial_pci_remove(struct pci_dev *dev)
struct parport_serial_private *priv = pci_get_drvdata (dev);
int i;
- pci_set_drvdata(dev, NULL);
-
// Serial ports
if (priv->serial)
pciserial_remove_ports(priv->serial);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f..e864392 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
- acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
+ if (status != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
}
+static bool acpiphp_no_hotplug(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ return adev && adev->flags.no_hotplug;
+}
+
+static bool slot_no_hotplug(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (acpiphp_no_hotplug(func_to_handle(func)))
+ return true;
+
+ return false;
+}
/**
* get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
+ alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+ || acpiphp_no_hotplug(handle);
}
if (!alive) {
u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
struct pci_dev *dev, *tmp;
mutex_lock(&slot->crit_sect);
- /* wake up all functions */
- if (get_slot_status(slot) == ACPI_STA_ALL) {
+ if (slot_no_hotplug(slot)) {
+ ; /* do nothing */
+ } else if (get_slot_status(slot) == ACPI_STA_ALL) {
/* remove stale devices if any */
list_for_each_entry_safe(dev, tmp, &bus->devices,
bus_list)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074e..f7ebdba 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
- if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
+ if (!adev)
+ return;
+
+ pci_acpi_add_pm_notifier(adev, pci_dev);
+ if (!adev->wakeup.flags.valid)
return;
device_set_wakeup_capable(dev, true);
acpi_pci_sleep_wake(pci_dev, false);
-
- pci_acpi_add_pm_notifier(adev, pci_dev);
if (adev->wakeup.flags.run_wake)
device_set_run_wake(dev, true);
}
static void pci_acpi_cleanup(struct device *dev)
{
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
- if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
+ pci_acpi_remove_pm_notifier(adev);
+ if (adev->wakeup.flags.valid) {
device_set_wakeup_capable(dev, false);
device_set_run_wake(dev, false);
- pci_acpi_remove_pm_notifier(adev);
}
}
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index ed3b522..971991b 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -303,7 +303,7 @@ static int bfin_cf_remove(struct platform_device *pdev)
static struct platform_driver bfin_cf_driver = {
.driver = {
- .name = (char *)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
},
.probe = bfin_cf_probe,
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 1b206ea..5ea64d0 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -359,7 +359,7 @@ MODULE_DEVICE_TABLE(of, electra_cf_match);
static struct platform_driver electra_cf_driver = {
.driver = {
- .name = (char *)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
.of_match_table = electra_cf_match,
},
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a49e853..b901c47 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -21,6 +21,12 @@ config PHY_EXYNOS_MIPI_VIDEO
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
and EXYNOS SoCs.
+config PHY_MVEBU_SATA
+ def_bool y
+ depends on ARCH_KIRKWOOD || ARCH_DOVE
+ depends on OF
+ select GENERIC_PHY
+
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c447f1a..b57c253 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
+obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 58e0e97..645c867 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -94,19 +94,31 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
int phy_pm_runtime_get(struct phy *phy)
{
+ int ret;
+
if (!pm_runtime_enabled(&phy->dev))
return -ENOTSUPP;
- return pm_runtime_get(&phy->dev);
+ ret = pm_runtime_get(&phy->dev);
+ if (ret < 0 && ret != -EINPROGRESS)
+ pm_runtime_put_noidle(&phy->dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
int phy_pm_runtime_get_sync(struct phy *phy)
{
+ int ret;
+
if (!pm_runtime_enabled(&phy->dev))
return -ENOTSUPP;
- return pm_runtime_get_sync(&phy->dev);
+ ret = pm_runtime_get_sync(&phy->dev);
+ if (ret < 0)
+ pm_runtime_put_sync(&phy->dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
@@ -155,13 +167,14 @@ int phy_init(struct phy *phy)
return ret;
mutex_lock(&phy->mutex);
- if (phy->init_count++ == 0 && phy->ops->init) {
+ if (phy->init_count == 0 && phy->ops->init) {
ret = phy->ops->init(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out;
}
}
+ ++phy->init_count;
out:
mutex_unlock(&phy->mutex);
@@ -179,13 +192,14 @@ int phy_exit(struct phy *phy)
return ret;
mutex_lock(&phy->mutex);
- if (--phy->init_count == 0 && phy->ops->exit) {
+ if (phy->init_count == 1 && phy->ops->exit) {
ret = phy->ops->exit(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
goto out;
}
}
+ --phy->init_count;
out:
mutex_unlock(&phy->mutex);
@@ -196,23 +210,27 @@ EXPORT_SYMBOL_GPL(phy_exit);
int phy_power_on(struct phy *phy)
{
- int ret = -ENOTSUPP;
+ int ret;
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
mutex_lock(&phy->mutex);
- if (phy->power_count++ == 0 && phy->ops->power_on) {
+ if (phy->power_count == 0 && phy->ops->power_on) {
ret = phy->ops->power_on(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out;
}
}
+ ++phy->power_count;
+ mutex_unlock(&phy->mutex);
+ return 0;
out:
mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put_sync(phy);
return ret;
}
@@ -220,22 +238,22 @@ EXPORT_SYMBOL_GPL(phy_power_on);
int phy_power_off(struct phy *phy)
{
- int ret = -ENOTSUPP;
+ int ret;
mutex_lock(&phy->mutex);
- if (--phy->power_count == 0 && phy->ops->power_off) {
+ if (phy->power_count == 1 && phy->ops->power_off) {
ret = phy->ops->power_off(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
- goto out;
+ mutex_unlock(&phy->mutex);
+ return ret;
}
}
-
-out:
+ --phy->power_count;
mutex_unlock(&phy->mutex);
phy_pm_runtime_put(phy);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(phy_power_off);
@@ -360,7 +378,7 @@ EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
struct phy *phy_get(struct device *dev, const char *string)
{
int index = 0;
- struct phy *phy = NULL;
+ struct phy *phy;
if (string == NULL) {
dev_WARN(dev, "missing string\n");
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
new file mode 100644
index 0000000..d43786f
--- /dev/null
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -0,0 +1,137 @@
+/*
+ * phy-mvebu-sata.c: SATA Phy driver for the Marvell mvebu SoCs.
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/phy/phy.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+struct priv {
+ struct clk *clk;
+ void __iomem *base;
+};
+
+#define SATA_PHY_MODE_2 0x0330
+#define MODE_2_FORCE_PU_TX BIT(0)
+#define MODE_2_FORCE_PU_RX BIT(1)
+#define MODE_2_PU_PLL BIT(2)
+#define MODE_2_PU_IVREF BIT(3)
+#define SATA_IF_CTRL 0x0050
+#define CTRL_PHY_SHUTDOWN BIT(9)
+
+static int phy_mvebu_sata_power_on(struct phy *phy)
+{
+ struct priv *priv = phy_get_drvdata(phy);
+ u32 reg;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Enable PLL and IVREF */
+ reg = readl(priv->base + SATA_PHY_MODE_2);
+ reg |= (MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX |
+ MODE_2_PU_PLL | MODE_2_PU_IVREF);
+ writel(reg , priv->base + SATA_PHY_MODE_2);
+
+ /* Enable PHY */
+ reg = readl(priv->base + SATA_IF_CTRL);
+ reg &= ~CTRL_PHY_SHUTDOWN;
+ writel(reg, priv->base + SATA_IF_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int phy_mvebu_sata_power_off(struct phy *phy)
+{
+ struct priv *priv = phy_get_drvdata(phy);
+ u32 reg;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Disable PLL and IVREF */
+ reg = readl(priv->base + SATA_PHY_MODE_2);
+ reg &= ~(MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX |
+ MODE_2_PU_PLL | MODE_2_PU_IVREF);
+ writel(reg, priv->base + SATA_PHY_MODE_2);
+
+ /* Disable PHY */
+ reg = readl(priv->base + SATA_IF_CTRL);
+ reg |= CTRL_PHY_SHUTDOWN;
+ writel(reg, priv->base + SATA_IF_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static struct phy_ops phy_mvebu_sata_ops = {
+ .power_on = phy_mvebu_sata_power_on,
+ .power_off = phy_mvebu_sata_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_mvebu_sata_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct priv *priv;
+ struct phy *phy;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&pdev->dev, "sata");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+
+ /* The boot loader may of left it on. Turn it off. */
+ phy_mvebu_sata_power_off(phy);
+
+ return 0;
+}
+
+static const struct of_device_id phy_mvebu_sata_of_match[] = {
+ { .compatible = "marvell,mvebu-sata-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_mvebu_sata_of_match);
+
+static struct platform_driver phy_mvebu_sata_driver = {
+ .probe = phy_mvebu_sata_probe,
+ .driver = {
+ .name = "phy-mvebu-sata",
+ .owner = THIS_MODULE,
+ .of_match_table = phy_mvebu_sata_of_match,
+ }
+};
+module_platform_driver(phy_mvebu_sata_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Marvell MVEBU SATA PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054a..85ad58c 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
config BATTERY_MAX17042
tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
+ select REGMAP_I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e6672..557af94 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
dev_set_drvdata(dev, psy);
psy->dev = dev;
+ rc = dev_set_name(dev, "%s", psy->name);
+ if (rc)
+ goto dev_set_name_failed;
+
INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto wakeup_init_failed;
- rc = kobject_set_name(&dev->kobj, "%s", psy->name);
- if (rc)
- goto kobject_set_name_failed;
-
rc = device_add(dev);
if (rc)
goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
-wakeup_init_failed:
device_del(dev);
-kobject_set_name_failed:
device_add_failed:
+wakeup_init_failed:
check_supplies_failed:
+dev_set_name_failed:
put_device(dev);
success:
return rc;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c5..3c67683 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
return 0;
}
+static const struct x86_cpu_id energy_unit_quirk_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+ {}
+};
+
static int rapl_check_unit(struct rapl_package *rp, int cpu)
{
u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
* time unit: 1/time_unit_divisor Seconds
*/
value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
- rp->energy_unit_divisor = 1 << value;
-
+ /* some CPUs have different way to calculate energy unit */
+ if (x86_match_cpu(energy_unit_quirk_ids))
+ rp->energy_unit_divisor = 1000000 / (1 << value);
+ else
+ rp->energy_unit_divisor = 1 << value;
value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
static const struct x86_cpu_id rapl_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
{ X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
{ X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
{ X86_VENDOR_INTEL, 6, 0x45},/* HSW */
/* TODO: Add more CPU IDs after testing */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f148762..a2325bc 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -34,11 +34,11 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/dmi.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
+ */
+static bool alarm_disable_quirk;
+
+static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
+{
+ alarm_disable_quirk = true;
+ pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
+ pr_info("RTC alarms disabled\n");
+ return 0;
+}
+
+static const struct dmi_system_id rtc_quirks[] __initconst = {
+ /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "IBM Truman",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
+ },
+ },
+ /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Gigabyte GA-990XA-UD3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
+ },
+ },
+ /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Toshiba Satellite L300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
+ },
+ },
+ {}
+};
+
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (!is_valid_irq(cmos->irq))
return -EINVAL;
+ if (alarm_disable_quirk)
+ return 0;
+
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -1157,6 +1205,8 @@ static int __init cmos_init(void)
platform_driver_registered = true;
}
+ dmi_check_system(rtc_quirks);
+
if (retval == 0)
return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f302efa9..1eef0f5 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3386,7 +3386,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
- * safe offline allready running
+ * safe offline already running
* could only be called by normal offline so safe_offline flag
* needs to be removed to run normal offline and kill all I/O
*/
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6fbe096..fea76ae 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -183,7 +183,6 @@ extern unsigned long sclp_console_full;
extern u8 sclp_fac84;
extern unsigned long long sclp_rzm;
extern unsigned long long sclp_rnmax;
-extern __initdata int sclp_early_read_info_sccb_valid;
/* useful inlines */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index eaa21d5..cb3c4e0 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -455,8 +455,6 @@ static int __init sclp_detect_standby_memory(void)
if (OLDMEM_BASE) /* No standby memory in kdump mode */
return 0;
- if (!sclp_early_read_info_sccb_valid)
- return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 1465e95..82f2c38 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -35,11 +35,12 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __packed __aligned(PAGE_SIZE);
-static __initdata struct read_info_sccb early_read_info_sccb;
-static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
+static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
+static unsigned int sclp_con_has_vt220 __initdata;
+static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
+static struct sclp_ipl_info sclp_ipl_info;
-__initdata int sclp_early_read_info_sccb_valid;
u64 sclp_facilities;
u8 sclp_fac84;
unsigned long long sclp_rzm;
@@ -63,15 +64,12 @@ out:
return rc;
}
-static void __init sclp_read_info_early(void)
+static int __init sclp_read_info_early(struct read_info_sccb *sccb)
{
- int rc;
- int i;
- struct read_info_sccb *sccb;
+ int rc, i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
- sccb = &early_read_info_sccb;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
@@ -83,24 +81,19 @@ static void __init sclp_read_info_early(void)
if (rc)
break;
- if (sccb->header.response_code == 0x10) {
- sclp_early_read_info_sccb_valid = 1;
- break;
- }
+ if (sccb->header.response_code == 0x10)
+ return 0;
if (sccb->header.response_code != 0x1f0)
break;
}
+ return -EIO;
}
-static void __init sclp_facilities_detect(void)
+static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
{
- struct read_info_sccb *sccb;
-
- sclp_read_info_early();
- if (!sclp_early_read_info_sccb_valid)
+ if (sclp_read_info_early(sccb))
return;
- sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
if (sccb->fac85 & 0x02)
@@ -108,30 +101,22 @@ static void __init sclp_facilities_detect(void)
sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
+
+ /* Save IPL information */
+ sclp_ipl_info.is_valid = 1;
+ if (sccb->flags & 0x2)
+ sclp_ipl_info.has_dump = 1;
+ memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
}
bool __init sclp_has_linemode(void)
{
- struct init_sccb *sccb = (void *) &sccb_early;
-
- if (sccb->header.response_code != 0x20)
- return 0;
- if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- return 1;
+ return !!sclp_con_has_linemode;
}
bool __init sclp_has_vt220(void)
{
- struct init_sccb *sccb = (void *) &sccb_early;
-
- if (sccb->header.response_code != 0x20)
- return 0;
- if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
- return 1;
- return 0;
+ return !!sclp_con_has_vt220;
}
unsigned long long sclp_get_rnmax(void)
@@ -146,19 +131,12 @@ unsigned long long sclp_get_rzm(void)
/*
* This function will be called after sclp_facilities_detect(), which gets
- * called from early.c code. Therefore the sccb should have valid contents.
+ * called from early.c code. The sclp_facilities_detect() function retrieves
+ * and saves the IPL information.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
- struct read_info_sccb *sccb;
-
- if (!sclp_early_read_info_sccb_valid)
- return;
- sccb = &early_read_info_sccb;
- info->is_valid = 1;
- if (sccb->flags & 0x2)
- info->has_dump = 1;
- memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
+ *info = sclp_ipl_info;
}
static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
@@ -189,11 +167,10 @@ static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
sccb->evbuf.dbs = 1;
}
-static int __init sclp_set_event_mask(unsigned long receive_mask,
+static int __init sclp_set_event_mask(struct init_sccb *sccb,
+ unsigned long receive_mask,
unsigned long send_mask)
{
- struct init_sccb *sccb = (void *) &sccb_early;
-
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
@@ -202,10 +179,8 @@ static int __init sclp_set_event_mask(unsigned long receive_mask,
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
-static long __init sclp_hsa_size_init(void)
+static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
- struct sdias_sccb *sccb = (void *) &sccb_early;
-
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
@@ -214,10 +189,8 @@ static long __init sclp_hsa_size_init(void)
return 0;
}
-static long __init sclp_hsa_copy_wait(void)
+static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
{
- struct sccb_header *sccb = (void *) &sccb_early;
-
memset(sccb, 0, PAGE_SIZE);
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
@@ -230,34 +203,62 @@ unsigned long sclp_get_hsa_size(void)
return sclp_hsa_size;
}
-static void __init sclp_hsa_size_detect(void)
+static void __init sclp_hsa_size_detect(void *sccb)
{
long size;
/* First try synchronous interface (LPAR) */
- if (sclp_set_event_mask(0, 0x40000010))
+ if (sclp_set_event_mask(sccb, 0, 0x40000010))
return;
- size = sclp_hsa_size_init();
+ size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
if (size != 0)
goto out;
/* Then try asynchronous interface (z/VM) */
- if (sclp_set_event_mask(0x00000010, 0x40000010))
+ if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
return;
- size = sclp_hsa_size_init();
+ size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
- size = sclp_hsa_copy_wait();
+ size = sclp_hsa_copy_wait(sccb);
if (size < 0)
return;
out:
sclp_hsa_size = size;
}
+static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
+{
+ if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ return 1;
+}
+
+static void __init sclp_console_detect(struct init_sccb *sccb)
+{
+ if (sccb->header.response_code != 0x20)
+ return;
+
+ if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+ sclp_con_has_vt220 = 1;
+
+ if (sclp_con_check_linemode(sccb))
+ sclp_con_has_linemode = 1;
+}
+
void __init sclp_early_detect(void)
{
- sclp_facilities_detect();
- sclp_hsa_size_detect();
- sclp_set_event_mask(0, 0);
+ void *sccb = &sccb_early;
+
+ sclp_facilities_detect(sccb);
+ sclp_hsa_size_detect(sccb);
+
+ /* Turn off SCLP event notifications. Also save remote masks in the
+ * sccb. These are sufficient to detect sclp console capabilities.
+ */
+ sclp_set_event_mask(sccb, 0, 0);
+ sclp_console_detect(sccb);
}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e..e91b89d 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -125,10 +125,7 @@ static void tty3270_resize_work(struct work_struct *work);
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
- if (expires == 0)
- del_timer(&tp->timer);
- else
- mod_timer(&tp->timer, jiffies + expires);
+ mod_timer(&tp->timer, jiffies + expires);
}
/*
@@ -744,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp)
{
int pages;
- del_timer_sync(&tp->timer);
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
@@ -877,6 +873,7 @@ tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
+ del_timer_sync(&tp->timer);
tty3270_free_screen(tp->screen, tp->view.rows);
tty3270_free_view(tp);
}
@@ -942,7 +939,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
- tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+ tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a9fe3de..b3f791b 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -260,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf)
parm = strsep(&buf, " ");
- if (strcmp("free", parm) == 0)
+ if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- else if (strcmp("add", parm) == 0)
+ css_schedule_eval_all_unreg(0);
+ } else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
- css_schedule_reprobe();
return rc;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 959135a..fd3367a1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -128,14 +128,14 @@ static ssize_t ccwgroup_online_store(struct device *dev,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
unsigned long value;
int ret;
- if (!dev->driver)
- return -EINVAL;
- if (!try_module_get(gdrv->driver.owner))
- return -EINVAL;
+ device_lock(dev);
+ if (!dev->driver) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kstrtoul(buf, 0, &value);
if (ret)
@@ -148,7 +148,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
else
ret = -EINVAL;
out:
- module_put(gdrv->driver.owner);
+ device_unlock(dev);
return (ret == 0) ? count : ret;
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 13299f9..f6b9188 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -55,6 +55,7 @@ int chsc_error_from_response(int response)
case 0x0004:
return -EOPNOTSUPP;
case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
return -EBUSY;
case 0x0100:
case 0x0102:
@@ -237,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
-static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
-
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -287,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
* The more information we have (info), the less scanning
* will we have to do.
*/
- for_each_subchannel_staged(__s390_process_res_acc,
- s390_process_res_acc_new_sch, link);
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
}
static int
@@ -663,19 +644,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
return 0;
}
-static int
-__s390_vary_chpid_on(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
@@ -694,7 +662,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path description. */
chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &chpid);
+ NULL, &chpid);
+ css_schedule_reprobe();
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &chpid);
@@ -1234,3 +1203,35 @@ out:
return ret;
}
EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid: id of the subchannel on which PNSO is performed
+ * @brinfo_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc)
+{
+ memset(brinfo_area, 0, sizeof(*brinfo_area));
+ brinfo_area->request.length = 0x0030;
+ brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+ brinfo_area->m = schid.m;
+ brinfo_area->ssid = schid.ssid;
+ brinfo_area->sch = schid.sch_no;
+ brinfo_area->cssid = schid.cssid;
+ brinfo_area->oc = 0; /* Store-network-bridging-information list */
+ brinfo_area->resume_token = resume_token;
+ brinfo_area->n = (cnc != 0);
+ if (chsc(brinfo_area))
+ return -EIO;
+ return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 23d072e..7e53a9c 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -61,7 +61,9 @@ struct css_chsc_char {
u32 : 20;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
- u32 : 19;
+ u32:7;
+ u32 pnso:1; /* bit 116 */
+ u32:11;
}__attribute__((packed));
extern struct css_chsc_char css_chsc_characteristics;
@@ -188,6 +190,53 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+struct chsc_brinfo_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_brinfo_naihdr {
+ struct chsc_brinfo_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_brinfo_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_brinfo_naihdr naihdr;
+ union {
+ struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
+ struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
+ struct qdio_brinfo_entry_l2 l2[0];
+ } entries;
+} __packed;
+
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc);
+
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
int scm_process_availability_information(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 8c2cb87..0268e5f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -69,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)
struct cb_data *cb = data;
int rc = 0;
- idset_sch_del(cb->set, sch->schid);
+ if (cb->set)
+ idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
@@ -115,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
+ if (fn_known && !fn_unknown) {
+ /* Skip idset allocation in case of known-only loop. */
+ cb.set = NULL;
+ return bus_for_each_dev(&css_bus_type, NULL, &cb,
+ call_fn_known_sch);
+ }
+
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
@@ -553,6 +561,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
default:
rc = 0;
}
+ /* Allow scheduling here since the containing loop might
+ * take a while. */
+ cond_resched();
}
return rc;
}
@@ -572,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-static DECLARE_WORK(slow_path_work, css_slow_path_func);
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
@@ -582,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
- queue_work(cio_work_q, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -593,7 +604,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(cio_work_q, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -606,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
-static void css_schedule_eval_all_unreg(void)
+void css_schedule_eval_all_unreg(unsigned long delay)
{
unsigned long flags;
struct idset *unreg_set;
@@ -624,7 +635,7 @@ static void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(cio_work_q, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
@@ -637,7 +648,8 @@ void css_wait_for_slow_path(void)
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
- css_schedule_eval_all_unreg();
+ /* Schedule with a delay to allow merging of subsequent calls. */
+ css_schedule_eval_all_unreg(1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2935132..2c9107e 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -133,6 +133,7 @@ extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e4a7ab2..e9d7835 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -333,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev)
if (ret != 0)
return ret;
}
- cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
sch = to_subchannel(cdev->dev.parent);
+ cdev->online = 0;
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -446,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev)
ret = cdev->drv->set_online(cdev);
if (ret)
goto rollback;
+
+ spin_lock_irq(cdev->ccwlock);
cdev->online = 1;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
rollback:
@@ -546,17 +549,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
if (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
ret = -EAGAIN;
- goto out_onoff;
+ goto out;
}
/* Prevent conflict between pending work and on-/offline processing.*/
if (work_pending(&cdev->private->todo_work)) {
ret = -EAGAIN;
- goto out_onoff;
- }
-
- if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
- ret = -EINVAL;
- goto out_onoff;
+ goto out;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
@@ -568,6 +566,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
}
if (ret)
goto out;
+
+ device_lock(dev);
switch (i) {
case 0:
ret = online_store_handle_offline(cdev);
@@ -578,10 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
default:
ret = -EINVAL;
}
+ device_unlock(dev);
+
out:
- if (cdev->drv)
- module_put(cdev->drv->driver.owner);
-out_onoff:
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
@@ -1745,8 +1744,7 @@ ccw_device_probe (struct device *dev)
return 0;
}
-static int
-ccw_device_remove (struct device *dev)
+static int ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
@@ -1754,9 +1752,10 @@ ccw_device_remove (struct device *dev)
if (cdrv->remove)
cdrv->remove(cdev);
+
+ spin_lock_irq(cdev->ccwlock);
if (cdev->online) {
cdev->online = 0;
- spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
@@ -1769,10 +1768,12 @@ ccw_device_remove (struct device *dev)
cdev->private->dev_id.devno);
/* Give up reference obtained in ccw_device_set_online(). */
put_device(&cdev->dev);
+ spin_lock_irq(cdev->ccwlock);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3e602e8..c883a08 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1752,6 +1752,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr)
}
EXPORT_SYMBOL(qdio_stop_irq);
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid: Subchannel ID.
+ * @cnc: Boolean Change-Notification Control
+ * @response: Response code will be stored at this address
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
+ * @type: Type of the address entry passed to the callback
+ * @entry: Entry containg the address of the specified type
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv)
+{
+ struct chsc_pnso_area *rr;
+ int rc;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+ if (rc != 0 && rc != -EBUSY)
+ goto out;
+ if (rr->response.code != 1) {
+ rc = -EIO;
+ continue;
+ } else
+ rc = 0;
+
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ elems = (rr->response.length -
+ sizeof(struct chsc_header) -
+ sizeof(struct chsc_brinfo_naihdr)) /
+ size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ switch (size) {
+ case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+ (*cb)(priv, l3_ipv6_addr,
+ &rr->entries.l3_ipv6[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+ (*cb)(priv, l3_ipv4_addr,
+ &rr->entries.l3_ipv4[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l2):
+ (*cb)(priv, l2_addr_lnid,
+ &rr->entries.l2[i]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ rc = -EIO;
+ goto out;
+ }
+ } while (rr->response.code == 0x0107 || /* channel busy */
+ (rr->response.code == 1 && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+ (*response) = rr->response.code;
+
+out:
+ free_page((unsigned long)rr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
static int __init init_QDIO(void)
{
int rc;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 02300dc..ab3baa7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -591,7 +591,13 @@ static int ap_init_queue(ap_qid_t qid)
if (rc != -ENODEV && rc != -EBUSY)
break;
if (i < AP_MAX_RESET - 1) {
- udelay(5);
+ /* Time we are waiting until we give up (0.7sec * 90).
+ * Since the actual request (in progress) will not
+ * interrupted immediately for the reset command,
+ * we have to be patient. In worst case we have to
+ * wait 60sec + reset time (some msec).
+ */
+ schedule_timeout(AP_RESET_TIMEOUT);
status = ap_test_queue(qid, &dummy, &dummy);
}
}
@@ -992,6 +998,28 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
+{
+ if (ap_configuration != NULL) { /* QCI not supported */
+ if (test_facility(76)) { /* format 1 - 256 bit domain field */
+ return snprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->adm[0], ap_configuration->adm[1],
+ ap_configuration->adm[2], ap_configuration->adm[3],
+ ap_configuration->adm[4], ap_configuration->adm[5],
+ ap_configuration->adm[6], ap_configuration->adm[7]);
+ } else { /* format 0 - 16 bit domain field */
+ return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
+ ap_configuration->adm[0], ap_configuration->adm[1]);
+ }
+ } else {
+ return snprintf(buf, PAGE_SIZE, "not supported\n");
+ }
+}
+
+static BUS_ATTR(ap_control_domain_mask, 0444,
+ ap_control_domain_mask_show, NULL);
+
static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1077,6 +1105,7 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
+ &bus_attr_ap_control_domain_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 685f6cc0..6405ae2 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -33,7 +33,7 @@
#define AP_DEVICES 64 /* Number of AP devices. */
#define AP_DOMAINS 16 /* Number of AP domains. */
#define AP_MAX_RESET 90 /* Maximum number of resets. */
-#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */
+#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
@@ -125,6 +125,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_FUNC_CRT4K 2
#define AP_FUNC_COPRO 3
#define AP_FUNC_ACCEL 4
+#define AP_FUNC_EP11 5
+#define AP_FUNC_APXA 6
/*
* AP reset flag states
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 31cfaa5..4b824b1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -44,6 +44,8 @@
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
+
/*
* Module description.
*/
@@ -554,9 +556,9 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) {
if (!zdev->online || !zdev->ops->send_cprb ||
- (xcRB->user_defined != AUTOSELECT &&
- AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
- )
+ (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
+ (xcRB->user_defined != AUTOSELECT &&
+ AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
continue;
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
@@ -581,6 +583,90 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
return -ENODEV;
}
+struct ep11_target_dev_list {
+ unsigned short targets_num;
+ struct ep11_target_dev *targets;
+};
+
+static bool is_desired_ep11dev(unsigned int dev_qid,
+ struct ep11_target_dev_list dev_list)
+{
+ int n;
+
+ for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
+ if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
+ (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+ struct zcrypt_device *zdev;
+ bool autoselect = false;
+ int rc;
+ struct ep11_target_dev_list ep11_dev_list = {
+ .targets_num = 0x00,
+ .targets = NULL,
+ };
+
+ ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
+
+ /* empty list indicates autoselect (all available targets) */
+ if (ep11_dev_list.targets_num == 0)
+ autoselect = true;
+ else {
+ ep11_dev_list.targets = kcalloc((unsigned short)
+ xcrb->targets_num,
+ sizeof(struct ep11_target_dev),
+ GFP_KERNEL);
+ if (!ep11_dev_list.targets)
+ return -ENOMEM;
+
+ if (copy_from_user(ep11_dev_list.targets,
+ (struct ep11_target_dev *)xcrb->targets,
+ xcrb->targets_num *
+ sizeof(struct ep11_target_dev)))
+ return -EFAULT;
+ }
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ /* check if device is eligible */
+ if (!zdev->online ||
+ zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
+ continue;
+
+ /* check if device is selected as valid target */
+ if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
+ !autoselect)
+ continue;
+
+ zcrypt_device_get(zdev);
+ get_device(&zdev->ap_dev->device);
+ zdev->request_count++;
+ __zcrypt_decrease_preference(zdev);
+ if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+ spin_unlock_bh(&zcrypt_device_lock);
+ rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
+ spin_lock_bh(&zcrypt_device_lock);
+ module_put(zdev->ap_dev->drv->driver.owner);
+ } else {
+ rc = -EAGAIN;
+ }
+ zdev->request_count--;
+ __zcrypt_increase_preference(zdev);
+ put_device(&zdev->ap_dev->device);
+ zcrypt_device_put(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ return rc;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return -ENODEV;
+}
+
static long zcrypt_rng(char *buffer)
{
struct zcrypt_device *zdev;
@@ -784,6 +870,23 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
+ case ZSENDEP11CPRB: {
+ struct ep11_urb __user *uxcrb = (void __user *)arg;
+ struct ep11_urb xcrb;
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_send_ep11_cprb(&xcrb);
+ } while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_send_ep11_cprb(&xcrb);
+ } while (rc == -EAGAIN);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+ return -EFAULT;
+ return rc;
+ }
case Z90STAT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 8963291..b3d496b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -74,6 +74,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX2A 6
#define ZCRYPT_CEX3C 7
#define ZCRYPT_CEX3A 8
+#define ZCRYPT_CEX4 10
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
@@ -89,6 +90,7 @@ struct zcrypt_ops {
long (*rsa_modexpo_crt)(struct zcrypt_device *,
struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
+ long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
long (*rng)(struct zcrypt_device *, char *);
struct list_head list; /* zcrypt ops list. */
struct module *owner;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ce12263..569f8b1 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -30,7 +30,12 @@
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
-#define CEX4_CLEANUP_TIME (15*HZ)
+/* Waiting time for requests to be processed.
+ * Currently there are some types of request which are not deterministic.
+ * But the maximum time limit managed by the stomper code is set to 60sec.
+ * Hence we have to wait at least that time period.
+ */
+#define CEX4_CLEANUP_TIME (61*HZ)
static struct ap_device_id zcrypt_cex4_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
@@ -101,6 +106,19 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
zdev->speed_rating = CEX4C_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
+ zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->type_string = "CEX4P";
+ zdev->user_space_type = ZCRYPT_CEX4;
+ zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ zdev->short_crt = 0;
+ zdev->speed_rating = CEX4C_SPEED_RATING;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_EP11);
}
break;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 0079b66..7b23f43 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -106,15 +106,15 @@ static inline int convert_error(struct zcrypt_device *zdev,
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
/*
* To sent a message of the wrong type is a bug in the
- * device driver. Warn about it, disable the device
+ * device driver. Send error msg, disable the device
* and then repeat the request.
*/
- WARN_ON(1);
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid,
- zdev->online, ehdr->reply_code);
+ zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
@@ -122,15 +122,17 @@ static inline int convert_error(struct zcrypt_device *zdev,
/* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid,
- zdev->online, ehdr->reply_code);
+ zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN;
default:
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid,
- zdev->online, ehdr->reply_code);
+ zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 7c522f3..334e282 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -25,6 +25,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -332,6 +335,11 @@ static int convert_type80(struct zcrypt_device *zdev,
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEX2A card may not do that.. */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online, t80h->code);
+
return -EAGAIN; /* repeat the request on a different device. */
}
if (zdev->user_space_type == ZCRYPT_CEX2A)
@@ -359,6 +367,10 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 7d97fa5..dc542e0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -25,6 +25,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -50,6 +53,7 @@ struct response_type {
};
#define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1
+#define PCIXCC_RESPONSE_TYPE_EP11 2
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
@@ -358,6 +362,91 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return 0;
}
+static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ep11_urb *xcRB)
+{
+ unsigned int lfmt;
+
+ static struct type6_hdr static_type6_ep11_hdr = {
+ .type = 0x06,
+ .rqid = {0x00, 0x01},
+ .function_code = {0x00, 0x00},
+ .agent_id[0] = 0x58, /* {'X'} */
+ .agent_id[1] = 0x43, /* {'C'} */
+ .offset1 = 0x00000058,
+ };
+
+ struct {
+ struct type6_hdr hdr;
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* payload length format */
+ } __packed * msg = ap_msg->message;
+
+ struct pld_hdr {
+ unsigned char func_tag; /* fixed value 0x4 */
+ unsigned char func_len; /* fixed value 0x4 */
+ unsigned int func_val; /* function ID */
+ unsigned char dom_tag; /* fixed value 0x4 */
+ unsigned char dom_len; /* fixed value 0x4 */
+ unsigned int dom_val; /* domain id */
+ } __packed * payload_hdr;
+
+ /* length checks */
+ ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
+ if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
+ (sizeof(struct type6_hdr)))
+ return -EINVAL;
+
+ if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
+ (sizeof(struct type86_fmt2_msg)))
+ return -EINVAL;
+
+ /* prepare type6 header */
+ msg->hdr = static_type6_ep11_hdr;
+ msg->hdr.ToCardLen1 = xcRB->req_len;
+ msg->hdr.FromCardLen1 = xcRB->resp_len;
+
+ /* Import CPRB data from the ioctl input parameter */
+ if (copy_from_user(&(msg->cprbx.cprb_len),
+ (char *)xcRB->req, xcRB->req_len)) {
+ return -EFAULT;
+ }
+
+ /*
+ The target domain field within the cprb body/payload block will be
+ replaced by the usage domain for non-management commands only.
+ Therefore we check the first bit of the 'flags' parameter for
+ management command indication.
+ 0 - non management command
+ 1 - management command
+ */
+ if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+ msg->cprbx.target_id = (unsigned int)
+ AP_QID_QUEUE(zdev->ap_dev->qid);
+
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
+ }
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr->dom_val = (unsigned int)
+ AP_QID_QUEUE(zdev->ap_dev->qid);
+ }
+ return 0;
+}
+
/**
* Copy results from a type 86 ICA reply message back to user space.
*
@@ -377,6 +466,12 @@ struct type86x_reply {
char text[0];
} __packed;
+struct type86_ep11_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct ep11_cprb cprbx;
+} __packed;
+
static int convert_type86_ica(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
@@ -440,6 +535,11 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online,
+ msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
@@ -503,6 +603,33 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
return 0;
}
+/**
+ * Copy results from a type 86 EP11 XCRB reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to EP11 user request block
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ struct ep11_urb *xcRB)
+{
+ struct type86_fmt2_msg *msg = reply->message;
+ char *data = reply->message;
+
+ if (xcRB->resp_len < msg->fmt2.count1)
+ return -EINVAL;
+
+ /* Copy response CPRB to user */
+ if (copy_to_user((char *)xcRB->resp,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
+ return -EFAULT;
+ xcRB->resp_len = msg->fmt2.count1;
+ return 0;
+}
+
static int convert_type86_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *buffer)
@@ -551,6 +678,10 @@ static int convert_response_ica(struct zcrypt_device *zdev,
* response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -579,10 +710,40 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
+static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply, struct ep11_urb *xcRB)
+{
+ struct type86_ep11_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *)reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE87_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return convert_error(zdev, reply);
+ if (msg->cprbx.cprb_ver_id == 0x04)
+ return convert_type86_ep11_xcrb(zdev, reply, xcRB);
+ /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
static int convert_response_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *data)
@@ -602,6 +763,10 @@ static int convert_response_rng(struct zcrypt_device *zdev,
* response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -657,6 +822,51 @@ out:
complete(&(resp_type->work));
}
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct response_type *resp_type =
+ (struct response_type *)msg->private;
+ struct type86_ep11_reply *t86r;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply)) {
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ goto out;
+ }
+ t86r = reply->message;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprbx.cprb_ver_id == 0x04) {
+ switch (resp_type->type) {
+ case PCIXCC_RESPONSE_TYPE_EP11:
+ length = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ length = min(MSGTYPE06_MAX_MSG_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ break;
+ default:
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ }
+ } else {
+ memcpy(msg->message, reply->message, sizeof(error_reply));
+ }
+out:
+ complete(&(resp_type->work));
+}
+
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
@@ -782,6 +992,46 @@ out_free:
}
/**
+ * The request distributor calls this function if it picked the CEX4P
+ * device to handle a send_ep11_cprb request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * CEX4P device to the request distributor
+ * @xcRB: pointer to the ep11 user request block
+ */
+static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
+ struct ep11_urb *xcrb)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_EP11,
+ };
+ int rc;
+
+ ap_init_message(&ap_msg);
+ ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_msgtype6_receive_ep11;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0)
+ rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
+ else /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+
+out_free:
+ kzfree(ap_msg.message);
+ return rc;
+}
+
+/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
* @zdev: pointer to zcrypt_device structure that identifies the
@@ -839,10 +1089,19 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
.rng = zcrypt_msgtype6_rng,
};
+static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
+ .owner = THIS_MODULE,
+ .variant = MSGTYPE06_VARIANT_EP11,
+ .rsa_modexpo = NULL,
+ .rsa_modexpo_crt = NULL,
+ .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
+};
+
int __init zcrypt_msgtype6_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
+ zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
return 0;
}
@@ -850,6 +1109,7 @@ void __exit zcrypt_msgtype6_exit(void)
{
zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
module_init(zcrypt_msgtype6_init);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 1e500d3..2072475 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -32,6 +32,7 @@
#define MSGTYPE06_NAME "zcrypt_msgtype6"
#define MSGTYPE06_VARIANT_DEFAULT 0
#define MSGTYPE06_VARIANT_NORNG 1
+#define MSGTYPE06_VARIANT_EP11 2
#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
@@ -99,6 +100,7 @@ struct type86_hdr {
} __packed;
#define TYPE86_RSP_CODE 0x86
+#define TYPE87_RSP_CODE 0x87
#define TYPE86_FMT2 0x02
struct type86_fmt2_ext {
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index f2b71d8..7a743f4 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -24,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -199,6 +202,10 @@ static int convert_type84(struct zcrypt_device *zdev,
if (t84h->len < sizeof(*t84h) + outputdatalength) {
/* The result is too short, the PCICA card may not do that.. */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online, t84h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
@@ -223,6 +230,10 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 0d90a43..4d14c04 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -24,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfp.h>
@@ -372,6 +375,11 @@ static int convert_type86(struct zcrypt_device *zdev,
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online,
+ msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
@@ -425,6 +433,10 @@ static int convert_response(struct zcrypt_device *zdev,
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 30fa38a..9176bfb 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -201,7 +201,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
instance->irq = IRQ_AMIGA_PORTS;
instance->unique_id = z->slotaddr;
- regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
+ regs = ZTWO_VADDR(z->resource.start);
regs->DAWR = DAWR_A2091;
wdregs.SASR = &regs->SASR;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c0f4f42..dd5b647 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -220,7 +220,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
instance->irq = IRQ_AMIGA_PORTS;
- regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
+ regs = ZTWO_VADDR(res->start);
regs->DAWR = DAWR_A3000;
wdregs.SASR = &regs->SASR;
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 70c521f..f5a2ab4 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -56,7 +56,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
scsi_addr = res->start + A4000T_SCSI_OFFSET;
/* Fill in the required pieces of hostdata */
- hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr);
+ hostdata->base = ZTWO_VADDR(scsi_addr);
hostdata->clock = 50;
hostdata->chip710 = 1;
hostdata->dmode_extra = DMODE_FC2;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 2203ac2..3b6f83f 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -310,7 +310,7 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
if (!request_mem_region(address, 256, "wd33c93"))
return -EBUSY;
- regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address));
+ regs = ZTWO_VADDR(address);
error = check_wd33c93(regs);
if (error)
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index cbf3476..aff3199 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -104,7 +104,7 @@ static int zorro7xx_init_one(struct zorro_dev *z,
if (ioaddr > 0x01000000)
hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
else
- hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr);
+ hostdata->base = ZTWO_VADDR(ioaddr);
hostdata->clock = 50;
hostdata->chip710 = 1;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3bfdaa8..4bb6b11 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -54,6 +54,8 @@ source "drivers/staging/rtl8188eu/Kconfig"
source "drivers/staging/rts5139/Kconfig"
+source "drivers/staging/rts5208/Kconfig"
+
source "drivers/staging/frontier/Kconfig"
source "drivers/staging/phison/Kconfig"
@@ -138,12 +140,8 @@ source "drivers/staging/netlogic/Kconfig"
source "drivers/staging/mt29f_spinand/Kconfig"
-source "drivers/staging/dwc2/Kconfig"
-
source "drivers/staging/lustre/Kconfig"
-source "drivers/staging/btmtk_usb/Kconfig"
-
source "drivers/staging/xillybus/Kconfig"
source "drivers/staging/dgnc/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b0d3303..9f07e5e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5139) += rts5139/
+obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
@@ -60,9 +61,7 @@ obj-$(CONFIG_DGRP) += dgrp/
obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_LUSTRE_FS) += lustre/
-obj-$(CONFIG_USB_BTMTK) += btmtk_usb/
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_DGAP) += dgap/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 1e9ab6d..b91c758 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -100,6 +100,8 @@ config SW_SYNC_USER
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
+source "drivers/staging/android/ion/Kconfig"
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c136299..0a01e191 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,5 +1,7 @@
ccflags-y += -I$(src) # needed for trace events
+obj-y += ion/
+
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index 647694f..2fc7cdd 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -68,11 +68,10 @@ static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
*/
static int is_wakeup(enum android_alarm_type type)
{
- return (type == ANDROID_ALARM_RTC_WAKEUP ||
- type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP);
+ return type == ANDROID_ALARM_RTC_WAKEUP ||
+ type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP;
}
-
static void devalarm_start(struct devalarm *alrm, ktime_t exp)
{
if (is_wakeup(alrm->type))
@@ -111,7 +110,6 @@ static void alarm_clear(enum android_alarm_type alarm_type)
}
alarm_enabled &= ~alarm_type_mask;
spin_unlock_irqrestore(&alarm_slock, flags);
-
}
static void alarm_set(enum android_alarm_type alarm_type,
@@ -280,6 +278,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+
#ifdef CONFIG_COMPAT
static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -371,7 +370,6 @@ static void devalarm_triggered(struct devalarm *alarm)
spin_unlock_irqrestore(&alarm_slock, flags);
}
-
static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
{
struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644
index 0000000..0f8fec1
--- /dev/null
+++ b/drivers/staging/android/ion/Kconfig
@@ -0,0 +1,35 @@
+menuconfig ION
+ bool "Ion Memory Manager"
+ depends on HAVE_MEMBLOCK
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ ---help---
+ Chose this option to enable the ION Memory Manager,
+ used by Android to efficiently allocate buffers
+ from userspace that can be shared between drivers.
+ If you're not using Android its probably safe to
+ say N here.
+
+config ION_TEST
+ tristate "Ion Test Device"
+ depends on ION
+ help
+ Choose this option to create a device that can be used to test the
+ kernel and device side ION functions.
+
+config ION_DUMMY
+ bool "Dummy Ion driver"
+ depends on ION
+ help
+ Provides a dummy ION driver that registers the
+ /dev/ion device and some basic heaps. This can
+ be used for testing the ION infrastructure if
+ one doesn't have access to hardware drivers that
+ use ION.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644
index 0000000..b56fd2b
--- /dev/null
+++ b/drivers/staging/android/ion/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+
+obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
+
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644
index 0000000..af6cd37
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -0,0 +1,177 @@
+/*
+ * drivers/staging/android/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+ compat_size_t len;
+ compat_size_t align;
+ compat_uint_t heap_id_mask;
+ compat_uint_t flags;
+ compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+};
+
+#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
+ struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data32->len);
+ err |= put_user(s, &data->len);
+ err |= get_user(s, &data32->align);
+ err |= put_user(s, &data->align);
+ err |= get_user(u, &data32->heap_id_mask);
+ err |= put_user(u, &data->heap_id_mask);
+ err |= get_user(u, &data32->flags);
+ err |= put_user(u, &data->flags);
+ err |= get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
+static int compat_put_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data->len);
+ err |= put_user(s, &data32->len);
+ err |= get_user(s, &data->align);
+ err |= put_user(s, &data32->align);
+ err |= get_user(u, &data->heap_id_mask);
+ err |= put_user(u, &data32->heap_id_mask);
+ err |= get_user(u, &data->flags);
+ err |= put_user(u, &data32->flags);
+ err |= get_user(i, &data->handle);
+ err |= put_user(i, &data32->handle);
+
+ return err;
+}
+
+static int compat_get_ion_custom_data(
+ struct compat_ion_custom_data __user *data32,
+ struct ion_custom_data __user *data)
+{
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+ int err;
+
+ err = get_user(cmd, &data32->cmd);
+ err |= put_user(cmd, &data->cmd);
+ err |= get_user(arg, &data32->arg);
+ err |= put_user(arg, &data->arg);
+
+ return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_ION_IOC_ALLOC:
+ {
+ struct compat_ion_allocation_data __user *data32;
+ struct ion_allocation_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_allocation_data(data32, data);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+ (unsigned long)data);
+ err = compat_put_ion_allocation_data(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_ION_IOC_FREE:
+ {
+ struct compat_ion_allocation_data __user *data32;
+ struct ion_allocation_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_allocation_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+ (unsigned long)data);
+ }
+ case COMPAT_ION_IOC_CUSTOM: {
+ struct compat_ion_custom_data __user *data32;
+ struct ion_custom_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_custom_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+ (unsigned long)data);
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ case ION_IOC_IMPORT:
+ case ION_IOC_SYNC:
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644
index 0000000..c2ad589
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/staging/android/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644
index 0000000..574066f
--- /dev/null
+++ b/drivers/staging/android/ion/ion.c
@@ -0,0 +1,1549 @@
+/*
+
+ * drivers/staging/android/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+ return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+ return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+ struct ion_buffer *buffer)
+{
+ struct rb_node **p = &dev->buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_buffer *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_buffer, node);
+
+ if (buffer < entry) {
+ p = &(*p)->rb_left;
+ } else if (buffer > entry) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
+ }
+ }
+
+ rb_link_node(&buffer->node, parent, p);
+ rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, ret;
+
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->heap = heap;
+ buffer->flags = flags;
+ kref_init(&buffer->ref);
+
+ ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+ if (ret) {
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_freelist_drain(heap, 0);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct scatterlist *sg;
+ int i, j, k = 0;
+
+ buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
+ buffer->pages[k++] = page++;
+ }
+
+ if (ret)
+ goto err;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
+ mutex_init(&buffer->lock);
+ /* this will set up dma addresses for the sglist -- it is not
+ technically correct as per the dma api -- a specific
+ device isn't really taking ownership here. However, in practice on
+ our systems the only dma_address space is physical addresses.
+ Additionally, we can't afford the overhead of invalidating every
+ allocation via dma_map_sg. The implicit contract here is that
+ memory comming from the heaps is ready for dma, ie if it has a
+ cached mapping that mapping has been invalidated */
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ sg_dma_address(sg) = sg_phys(sg);
+ mutex_lock(&dev->buffer_lock);
+ ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
+ return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+err1:
+ if (buffer->pages)
+ vfree(buffer->pages);
+err2:
+ kfree(buffer);
+ return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+ buffer->heap->ops->free(buffer);
+ if (buffer->pages)
+ vfree(buffer->pages);
+ kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_freelist_add(heap, buffer);
+ else
+ ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+ kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+ return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle;
+
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&handle->ref);
+ RB_CLEAR_NODE(&handle->node);
+ handle->client = client;
+ ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
+ handle->buffer = buffer;
+
+ return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+ struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
+ idr_remove(&client->idr, handle->id);
+ if (!RB_EMPTY_NODE(&handle->node))
+ rb_erase(&handle->node, &client->handles);
+
+ ion_buffer_remove_from_handle(buffer);
+ ion_buffer_put(buffer);
+
+ kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+ kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+ struct ion_client *client = handle->client;
+ int ret;
+
+ mutex_lock(&client->lock);
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+ mutex_unlock(&client->lock);
+
+ return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct rb_node *n = client->handles.rb_node;
+
+ while (n) {
+ struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+ if (buffer < entry->buffer)
+ n = n->rb_left;
+ else if (buffer > entry->buffer)
+ n = n->rb_right;
+ else
+ return entry;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
+{
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = idr_find(&client->idr, id);
+ if (handle)
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+
+ return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ int id;
+ struct rb_node **p = &client->handles.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_handle *entry;
+
+ id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ handle->id = id;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_handle, node);
+
+ if (handle->buffer < entry->buffer)
+ p = &(*p)->rb_left;
+ else if (handle->buffer > entry->buffer)
+ p = &(*p)->rb_right;
+ else
+ WARN(1, "%s: buffer already found.", __func__);
+ }
+
+ rb_link_node(&handle->node, parent, p);
+ rb_insert_color(&handle->node, &client->handles);
+
+ return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
+ int ret;
+
+ pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+ len, align, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return ERR_PTR(-EINVAL);
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, align, flags);
+ if (!IS_ERR(buffer))
+ break;
+ }
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer))
+ return ERR_PTR(PTR_ERR(buffer));
+
+ handle = ion_handle_create(client, buffer);
+
+ /*
+ * ion_buffer_create will create a buffer with a ref_cnt of 1,
+ * and ion_handle_create will take a second reference, drop one here
+ */
+ ion_buffer_put(buffer);
+
+ if (IS_ERR(handle))
+ return handle;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&client->lock);
+ return;
+ }
+ mutex_unlock(&client->lock);
+ ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
+ return ERR_PTR(-EINVAL);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ void *vaddr;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+ size_t sizes[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
+ int i;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ unsigned int id = handle->buffer->heap->id;
+
+ if (!names[id])
+ names[id] = handle->buffer->heap->name;
+ sizes[id] += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+
+ seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+ if (!names[i])
+ continue;
+ seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+ }
+ return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+ .open = ion_debug_client_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name)
+{
+ struct ion_client *client;
+ struct task_struct *task;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ion_client *entry;
+ char debug_name[64];
+ pid_t pid;
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ pid = task_pid_nr(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->group_leader->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ if (!client) {
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ client->dev = dev;
+ client->handles = RB_ROOT;
+ idr_init(&client->idr);
+ mutex_init(&client->lock);
+ client->name = name;
+ client->task = task;
+ client->pid = pid;
+
+ down_write(&dev->lock);
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
+
+ snprintf(debug_name, 64, "%u", client->pid);
+ client->debug_root = debugfs_create_file(debug_name, 0664,
+ dev->debug_root, client,
+ &debug_client_fops);
+ up_write(&dev->lock);
+
+ return client;
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+ struct ion_device *dev = client->dev;
+ struct rb_node *n;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ while ((n = rb_first(&client->handles))) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ ion_handle_destroy(&handle->ref);
+ }
+
+ idr_destroy(&client->idr);
+
+ down_write(&dev->lock);
+ if (client->task)
+ put_task_struct(client->task);
+ rb_erase(&client->node, &dev->clients);
+ debugfs_remove_recursive(client->debug_root);
+ up_write(&dev->lock);
+
+ kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+ return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+ * for the the targeted device, but this works on the currently targeted
+ * hardware.
+ */
+ sg_dma_address(&sg) = page_to_phys(page);
+ dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct ion_vma_list *vma_list;
+ int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ int i;
+
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
+
+ if (!ion_buffer_fault_user_mappings(buffer))
+ return;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < pages; i++) {
+ struct page *page = buffer->pages[i];
+
+ if (ion_buffer_page_is_dirty(page))
+ ion_pages_sync_for_device(dev, ion_buffer_page(page),
+ PAGE_SIZE, dir);
+
+ ion_buffer_page_clean(buffer->pages + i);
+ }
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ unsigned long pfn;
+ int ret;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+ BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+ pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ mutex_unlock(&buffer->lock);
+ if (ret)
+ return VM_FAULT_ERROR;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+ VM_DONTDUMP;
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ return 0;
+ }
+
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ mutex_unlock(&client->lock);
+
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return dmabuf;
+ }
+
+ return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = ion_share_dma_buf(client, handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
+
+ return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+ int ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR(handle)) {
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+ goto end;
+ }
+ mutex_unlock(&client->lock);
+
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR(handle))
+ goto end;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+end:
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ion_free(client, handle);
+ ion_handle_put(handle);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+ handle = ion_import_dma_buf(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+ struct ion_client *client = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ ion_client_destroy(client);
+ return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+ struct ion_client *client;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ client = ion_client_create(dev, "user");
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+ file->private_data = client;
+
+ return 0;
+}
+
+static const struct file_operations ion_fops = {
+ .owner = THIS_MODULE,
+ .open = ion_open,
+ .release = ion_release,
+ .unlocked_ioctl = ion_ioctl,
+ .compat_ioctl = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+ unsigned int id)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle,
+ node);
+ if (handle->buffer->heap->id == id)
+ size += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
+
+ seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
+
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ size_t size = ion_debug_heap_total(client, heap->id);
+ if (!size)
+ continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16zu\n", client->name,
+ client->pid, size);
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->id != heap->id)
+ continue;
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ buffer->task_comm, buffer->pid,
+ buffer->size, buffer->kmap_cnt,
+ atomic_read(&buffer->ref.refcount));
+ total_orphaned_size += buffer->size;
+ }
+ }
+ mutex_unlock(&dev->buffer_lock);
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16zu\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ seq_printf(s, "%16.s %16zu\n", "deferred free",
+ heap->free_list_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+ debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_init_deferred_free(heap);
+
+ heap->dev = dev;
+ down_write(&dev->lock);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
+ plist_add(&heap->node, &dev->heaps);
+ debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+ &debug_heap_fops);
+#ifdef DEBUG_HEAP_SHRINKER
+ if (heap->shrinker.shrink) {
+ char debug_name[64];
+
+ snprintf(debug_name, 64, "%s_shrink", heap->name);
+ debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
+ &debug_shrink_fops);
+ }
+#endif
+ up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg))
+{
+ struct ion_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ if (!idev)
+ return ERR_PTR(-ENOMEM);
+
+ idev->dev.minor = MISC_DYNAMIC_MINOR;
+ idev->dev.name = "ion";
+ idev->dev.fops = &ion_fops;
+ idev->dev.parent = NULL;
+ ret = misc_register(&idev->dev);
+ if (ret) {
+ pr_err("ion: failed to register misc device.\n");
+ return ERR_PTR(ret);
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ if (!idev->debug_root)
+ pr_err("ion: failed to create debug files.\n");
+
+ idev->custom_ioctl = custom_ioctl;
+ idev->buffers = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->heaps);
+ idev->clients = RB_ROOT;
+ return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+ misc_deregister(&dev->dev);
+ /* XXX need to free the heaps and clients ? */
+ kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for "
+ "heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %zx@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+ data->heaps[i].name,
+ data->heaps[i].base,
+ data->heaps[i].size);
+ }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644
index 0000000..dcd2a0c
--- /dev/null
+++ b/drivers/staging/android/ion/ion.h
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ ion_phys_addr_t align;
+ void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client: the client
+ * @handle: the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644
index 0000000..3cb05b9
--- /dev/null
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/staging/android/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+ unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct sg_table *table;
+ ion_phys_addr_t paddr;
+ int ret;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
+ }
+
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+ .allocate = ion_carveout_heap_allocate,
+ .free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ if (!carveout_heap)
+ return ERR_PTR(-ENOMEM);
+
+ carveout_heap->pool = gen_pool_create(12, -1);
+ if (!carveout_heap->pool) {
+ kfree(carveout_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ carveout_heap->base = heap_data->base;
+ gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+ -1);
+ carveout_heap->heap.ops = &carveout_heap_ops;
+ carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+ return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ gen_pool_destroy(carveout_heap->pool);
+ kfree(carveout_heap);
+ carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644
index 0000000..d40f5f8
--- /dev/null
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long chunk_size;
+ unsigned long size;
+ unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret, i;
+ unsigned long num_chunks;
+ unsigned long allocated_size;
+
+ if (align > chunk_heap->chunk_size)
+ return -EINVAL;
+
+ allocated_size = ALIGN(size, chunk_heap->chunk_size);
+ num_chunks = allocated_size / chunk_heap->chunk_size;
+
+ if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ret;
+ }
+
+ sg = table->sgl;
+ for (i = 0; i < num_chunks; i++) {
+ unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+ chunk_heap->chunk_size);
+ if (!paddr)
+ goto err;
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
+ sg = sg_next(sg);
+ }
+
+ buffer->priv_virt = table;
+ chunk_heap->allocated += allocated_size;
+ return 0;
+err:
+ sg = table->sgl;
+ for (i -= 1; i >= 0; i--) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ sg = sg_next(sg);
+ }
+ sg_free_table(table);
+ kfree(table);
+ return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+ unsigned long allocated_size;
+
+ allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ }
+ chunk_heap->allocated -= allocated_size;
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+ .allocate = ion_chunk_heap_allocate,
+ .free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_chunk_heap *chunk_heap;
+ int ret;
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ if (!chunk_heap)
+ return ERR_PTR(-ENOMEM);
+
+ chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+ PAGE_SHIFT, -1);
+ if (!chunk_heap->pool) {
+ ret = -ENOMEM;
+ goto error_gen_pool_create;
+ }
+ chunk_heap->base = heap_data->base;
+ chunk_heap->size = heap_data->size;
+ chunk_heap->allocated = 0;
+
+ gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ chunk_heap->heap.ops = &chunk_heap_ops;
+ chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+ chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+ heap_data->size, heap_data->align);
+
+ return &chunk_heap->heap;
+
+error_gen_pool_create:
+ kfree(chunk_heap);
+ return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+
+ gen_pool_destroy(chunk_heap->pool);
+ kfree(chunk_heap);
+ chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644
index 0000000..f0f9889
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/staging/android/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+ struct ion_heap heap;
+ struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+ void *cpu_addr;
+ dma_addr_t handle;
+ struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info;
+
+ dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ return -EINVAL;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "Can't allocate buffer info\n");
+ return ION_CMA_ALLOCATE_FAILED;
+ }
+
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
+
+ if (!info->cpu_addr) {
+ dev_err(dev, "Fail to allocate buffer\n");
+ goto err;
+ }
+
+ info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(dev, "Fail to allocate sg table\n");
+ goto free_mem;
+ }
+
+ if (ion_cma_get_sgtable
+ (dev, info->table, info->cpu_addr, info->handle, len))
+ goto free_table;
+ /* keep this for memory release */
+ buffer->priv_virt = info;
+ dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ return 0;
+
+free_table:
+ kfree(info->table);
+free_mem:
+ dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Release buffer %p\n", buffer);
+ /* release memory */
+ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ /* release sg table */
+ sg_free_table(info->table);
+ kfree(info->table);
+ kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+ buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+ /* kernel memory mapping has been done at allocation time */
+ return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+ .allocate = ion_cma_allocate,
+ .free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
+ .map_user = ion_cma_mmap,
+ .map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_cma_heap *cma_heap;
+
+ cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+ if (!cma_heap)
+ return ERR_PTR(-ENOMEM);
+
+ cma_heap->heap.ops = &ion_cma_ops;
+ /* get device from private heaps data, later it will be
+ * used to make the link with reserved CMA memory */
+ cma_heap->dev = data->priv;
+ cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+ return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+ kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
new file mode 100644
index 0000000..55b2002
--- /dev/null
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -0,0 +1,158 @@
+/*
+ * drivers/gpu/ion/ion_dummy_driver.c
+ *
+ * Copyright (C) 2013 Linaro, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_device *idev;
+struct ion_heap **heaps;
+
+void *carveout_ptr;
+void *chunk_ptr;
+
+struct ion_platform_heap dummy_heaps[] = {
+ {
+ .id = ION_HEAP_TYPE_SYSTEM,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = "system",
+ },
+ {
+ .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "system contig",
+ },
+ {
+ .id = ION_HEAP_TYPE_CARVEOUT,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = "carveout",
+ .size = SZ_4M,
+ },
+ {
+ .id = ION_HEAP_TYPE_CHUNK,
+ .type = ION_HEAP_TYPE_CHUNK,
+ .name = "chunk",
+ .size = SZ_4M,
+ .align = SZ_16K,
+ .priv = (void *)(SZ_16K),
+ },
+};
+
+struct ion_platform_data dummy_ion_pdata = {
+ .nr = 4,
+ .heaps = dummy_heaps,
+};
+
+static int __init ion_dummy_init(void)
+{
+ int i, err;
+
+ idev = ion_device_create(NULL);
+ heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
+ GFP_KERNEL);
+ if (!heaps)
+ return PTR_ERR(heaps);
+
+
+ /* Allocate a dummy carveout heap */
+ carveout_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
+ GFP_KERNEL);
+ if (carveout_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
+ virt_to_phys(carveout_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate carveout\n");
+
+ /* Allocate a dummy chunk heap */
+ chunk_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
+ GFP_KERNEL);
+ if (chunk_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate chunk\n");
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+ if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
+ !heap_data->base)
+ continue;
+
+ if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
+ continue;
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ return 0;
+err:
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+ return err;
+}
+
+static void __exit ion_dummy_exit(void)
+{
+ int i;
+
+ ion_device_destroy(idev);
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+
+ return;
+}
+
+module_init(ion_dummy_init);
+module_exit(ion_dummy_exit);
+
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644
index 0000000..296c74f
--- /dev/null
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -0,0 +1,318 @@
+/*
+ * drivers/staging/android/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return NULL;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++)
+ *(tmp++) = page++;
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ if (vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+ int ret;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+ if (!addr)
+ return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
+
+ return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+ if (ret)
+ return ret;
+ p = 0;
+ }
+ }
+ if (p)
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+
+ return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+ spin_lock(&heap->free_lock);
+ list_add(&buffer->list, &heap->free_list);
+ heap->free_list_size += buffer->size;
+ spin_unlock(&heap->free_lock);
+ wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+ size_t size;
+
+ spin_lock(&heap->free_lock);
+ size = heap->free_list_size;
+ spin_unlock(&heap->free_lock);
+
+ return size;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ struct ion_buffer *buffer;
+ size_t total_drained = 0;
+
+ if (ion_heap_freelist_size(heap) == 0)
+ return 0;
+
+ spin_lock(&heap->free_lock);
+ if (size == 0)
+ size = heap->free_list_size;
+
+ while (!list_empty(&heap->free_list)) {
+ if (total_drained >= size)
+ break;
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ total_drained += buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ spin_lock(&heap->free_lock);
+ }
+ spin_unlock(&heap->free_lock);
+
+ return total_drained;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ ion_heap_freelist_size(heap) > 0);
+
+ spin_lock(&heap->free_lock);
+ if (list_empty(&heap->free_list)) {
+ spin_unlock(&heap->free_lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&heap->free_list);
+ heap->free_list_size = 0;
+ spin_lock_init(&heap->free_lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
+ if (IS_ERR(heap->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_RET(heap->task);
+ }
+ return 0;
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch (heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ heap = ion_system_contig_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ heap = ion_system_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ heap = ion_carveout_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ heap = ion_chunk_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ heap = ion_cma_heap_create(heap_data);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ ion_system_contig_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ ion_system_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ ion_carveout_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ ion_chunk_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ ion_cma_heap_destroy(heap);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap->type);
+ }
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644
index 0000000..fa693c2
--- /dev/null
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+ int total = 0;
+
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ int nr_freed = 0;
+ int i;
+ bool high;
+
+ high = !!(gfp_mask & __GFP_HIGHMEM);
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_total(pool, high);
+
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
+ }
+
+ return nr_freed;
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644
index 0000000..d986739
--- /dev/null
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -0,0 +1,360 @@
+/*
+ * drivers/staging/android/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: refernce count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+*/
+struct ion_buffer {
+ struct kref ref;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ size_t size;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ int dmap_cnt;
+ struct sg_table *sg_table;
+ struct page **pages;
+ struct list_head vmas;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ */
+struct ion_heap_ops {
+ int (*allocate) (struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free) (struct ion_buffer *buffer);
+ int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @shrinker: a shrinker for the heap, if the heap caches system
+ * memory, it must define a shrinker to return it on low
+ * memory conditions, this includes system memory cached
+ * in the deferred free lists for heaps that support it
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+ struct plist_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev: the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev: the device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+ struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @shrinker: a shrinker for the items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @alloc: function to be used to allocate pageory when the pool
+ * is empty
+ * @free: function to be used to free pageory back to the system
+ * when the shrinker fires
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ * device
+ * @dev: the device the pages will be used with
+ * @page: the first page to be flushed
+ * @size: size in bytes of region to be flushed
+ * @dir: direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644
index 0000000..7f07291
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -0,0 +1,488 @@
+/*
+ * drivers/staging/android/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
+struct page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return NULL;
+
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ struct page_info *info;
+ int i;
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
+ if (!page)
+ continue;
+
+ info->page = page;
+ info->order = orders[i];
+ return info;
+ }
+ kfree(info);
+
+ return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret;
+ struct list_head pages;
+ struct page_info *info, *tmp_info;
+ int i = 0;
+ long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
+ max_order);
+ if (!info)
+ goto err;
+ list_add_tail(&info->list, &pages);
+ size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
+ i++;
+ }
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ goto err;
+
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+ if (ret)
+ goto err1;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ struct page *page = info->page;
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ list_del(&info->list);
+ kfree(info);
+ }
+
+ buffer->priv_virt = table;
+ return 0;
+err1:
+ kfree(table);
+err:
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
+
+ /* uncached pages come from the page pools, zero them before returning
+ for security purposes (other allocations are zerod at alloc time */
+ if (!cached)
+ ion_heap_buffer_zero(buffer);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+};
+
+static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int nr_total = 0;
+ int i;
+
+ /* total number of items is whatever the page pools are holding
+ plus whatever's in the freelist */
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
+ }
+ nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
+ return nr_total;
+
+}
+
+static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int nr_freed = 0;
+ int i;
+
+ if (sc->nr_to_scan == 0)
+ goto end;
+
+ /* shrink the free list first, no point in zeroing the memory if
+ we're just going to reclaim it */
+ nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
+ PAGE_SIZE;
+
+ if (nr_freed >= sc->nr_to_scan)
+ goto end;
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+
+ nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
+ sc->nr_to_scan);
+ if (nr_freed >= sc->nr_to_scan)
+ break;
+ }
+
+end:
+ return nr_freed;
+
+}
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+ return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_system_heap *heap;
+ int i;
+
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+
+ heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
+ heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
+ heap->heap.shrinker.seeks = DEFAULT_SEEKS;
+ heap->heap.shrinker.batch = 0;
+ register_shrinker(&heap->heap.shrinker);
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ int order = get_order(len);
+ struct page *page;
+ struct sg_table *table;
+ unsigned long i;
+ int ret;
+
+ if (align > (PAGE_SIZE << order))
+ return -EINVAL;
+
+ page = alloc_pages(low_order_gfp_flags, order);
+ if (!page)
+ return -ENOMEM;
+
+ split_page(page, order);
+
+ len = PAGE_ALIGN(len);
+ for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+ __free_page(page + i);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_set_page(table->sgl, page, len, 0);
+
+ buffer->priv_virt = table;
+
+ ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+ return 0;
+
+out:
+ for (i = 0; i < len >> PAGE_SHIFT; i++)
+ __free_page(page + i);
+ kfree(table);
+ return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ unsigned long i;
+
+ for (i = 0; i < pages; i++)
+ __free_page(page + i);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
+ *len = buffer->size;
+ return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+ .allocate = ion_system_contig_heap_allocate,
+ .free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &kmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 0000000..654acb5
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,282 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+ struct miscdevice misc;
+};
+
+struct ion_test_data {
+ struct dma_buf *dma_buf;
+ struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+ void __user *ptr, size_t offset, size_t size, bool write)
+{
+ int ret = 0;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct sg_page_iter sg_iter;
+ unsigned long offset_page;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ table = dma_buf_map_attachment(attach, dir);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ offset_page = offset >> PAGE_SHIFT;
+ offset %= PAGE_SIZE;
+
+ for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+ size_t to_copy = PAGE_SIZE - offset;
+
+ to_copy = min(to_copy, size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (write)
+ ret = copy_from_user(vaddr + offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+ vunmap(vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ size -= to_copy;
+ if (!size)
+ break;
+ ptr += to_copy;
+ offset = 0;
+ }
+
+err:
+ dma_buf_unmap_attachment(attach, table, dir);
+ dma_buf_detach(dma_buf, attach);
+ return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+ size_t offset, size_t size, bool write)
+{
+ int ret;
+ unsigned long page_offset = offset >> PAGE_SHIFT;
+ size_t copy_offset = offset % PAGE_SIZE;
+ size_t copy_size = size;
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (offset > dma_buf->size || size > dma_buf->size - offset)
+ return -EINVAL;
+
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ if (ret)
+ return ret;
+
+ while (copy_size > 0) {
+ size_t to_copy;
+ void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+ if (!vaddr)
+ goto err;
+
+ to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+ if (write)
+ ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+ dma_buf_kunmap(dma_buf, page_offset, vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ copy_size -= to_copy;
+ ptr += to_copy;
+ page_offset++;
+ copy_offset = 0;
+ }
+err:
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ion_test_data *test_data = filp->private_data;
+ int ret = 0;
+
+ union {
+ struct ion_test_rw_data test_rw;
+ } data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_TEST_SET_FD:
+ {
+ struct dma_buf *dma_buf = NULL;
+ int fd = arg;
+
+ if (fd >= 0) {
+ dma_buf = dma_buf_get((int)arg);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+ }
+ if (test_data->dma_buf)
+ dma_buf_put(test_data->dma_buf);
+ test_data->dma_buf = dma_buf;
+ break;
+ }
+ case ION_IOC_TEST_DMA_MAPPING:
+ {
+ ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ case ION_IOC_TEST_KERNEL_MAPPING:
+ {
+ ret = ion_handle_test_kernel(test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data;
+ struct miscdevice *miscdev = file->private_data;
+
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = miscdev->parent;
+
+ file->private_data = data;
+
+ return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data = file->private_data;
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ion_test_ioctl,
+ .compat_ioctl = ion_test_ioctl,
+ .open = ion_test_open,
+ .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ion_test_device *testdev;
+
+ testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+ GFP_KERNEL);
+ if (!testdev)
+ return -ENOMEM;
+
+ testdev->misc.minor = MISC_DYNAMIC_MINOR;
+ testdev->misc.name = "ion-test";
+ testdev->misc.fops = &ion_test_fops;
+ testdev->misc.parent = &pdev->dev;
+ ret = misc_register(&testdev->misc);
+ if (ret) {
+ pr_err("failed to register misc device.\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, testdev);
+
+ return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+ .driver = {
+ .name = "ion-test",
+ },
+};
+
+static int __init ion_test_init(void)
+{
+ platform_device_register_simple("ion-test", -1, NULL, 0);
+ return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+ platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644
index 0000000..11cd003
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644
index 0000000..3474c65
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -0,0 +1,84 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ idev = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
+ return PTR_ERR(idev);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ platform_set_drvdata(pdev, idev);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+ return err;
+}
+
+static int tegra_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = tegra_ion_probe,
+ .remove = tegra_ion_remove,
+ .driver = { .name = "ion-tegra" }
+};
+
+module_platform_driver(ion_driver);
+
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 38ea986..62e2255b 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -28,7 +28,7 @@ struct sync_fence;
/**
* struct sync_timeline_ops - sync object implementation ops
- * @driver_name: name of the implentation
+ * @driver_name: name of the implementation
* @dup: duplicate a sync_pt
* @has_signaled: returns:
* 1 if pt has signaled
@@ -37,12 +37,12 @@ struct sync_fence;
* @compare: returns:
* 1 if b will signal before a
* 0 if a and b will signal at the same time
- * -1 if a will signabl before b
+ * -1 if a will signal before b
* @free_pt: called before sync_pt is freed
* @release_obj: called before sync_timeline is freed
* @print_obj: deprecated
* @print_pt: deprecated
- * @fill_driver_data: write implmentation specific driver data to data.
+ * @fill_driver_data: write implementation specific driver data to data.
* should return an error if there is not enough room
* as specified by size. This information is returned
* to userspace by SYNC_IOC_FENCE_INFO.
@@ -88,9 +88,9 @@ struct sync_timeline_ops {
/**
* struct sync_timeline - sync object
* @kref: reference count on fence.
- * @ops: ops that define the implementaiton of the sync_timeline
+ * @ops: ops that define the implementation of the sync_timeline
* @name: name of the sync_timeline. Useful for debugging
- * @destoryed: set when sync_timeline is destroyed
+ * @destroyed: set when sync_timeline is destroyed
* @child_list_head: list of children sync_pts for this sync_timeline
* @child_list_lock: lock protecting @child_list_head, destroyed, and
* sync_pt.status
@@ -119,12 +119,12 @@ struct sync_timeline {
* @parent: sync_timeline to which this sync_pt belongs
* @child_list: membership in sync_timeline.child_list_head
* @active_list: membership in sync_timeline.active_list_head
- * @signaled_list: membership in temorary signaled_list on stack
+ * @signaled_list: membership in temporary signaled_list on stack
* @fence: sync_fence to which the sync_pt belongs
* @pt_list: membership in sync_fence.pt_list_head
* @status: 1: signaled, 0:active, <0: error
* @timestamp: time which sync_pt status transitioned from active to
- * singaled or error.
+ * signaled or error.
*/
struct sync_pt {
struct sync_timeline *parent;
@@ -145,9 +145,9 @@ struct sync_pt {
/**
* struct sync_fence - sync fence
* @file: file representing this fence
- * @kref: referenace count on fence.
+ * @kref: reference count on fence.
* @name: name of sync_fence. Useful for debugging
- * @pt_list_head: list of sync_pts in ths fence. immutable once fence
+ * @pt_list_head: list of sync_pts in the fence. immutable once fence
* is created
* @waiter_list_head: list of asynchronous waiters on this fence
* @waiter_list_lock: lock protecting @waiter_list_head and @status
@@ -201,23 +201,23 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
/**
* sync_timeline_create() - creates a sync object
- * @ops: specifies the implemention ops for the object
+ * @ops: specifies the implementation ops for the object
* @size: size to allocate for this obj
* @name: sync_timeline name
*
- * Creates a new sync_timeline which will use the implemetation specified by
- * @ops. @size bytes will be allocated allowing for implemntation specific
- * data to be kept after the generic sync_timeline stuct.
+ * Creates a new sync_timeline which will use the implementation specified by
+ * @ops. @size bytes will be allocated allowing for implementation specific
+ * data to be kept after the generic sync_timeline struct.
*/
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name);
/**
- * sync_timeline_destory() - destorys a sync object
+ * sync_timeline_destroy() - destroys a sync object
* @obj: sync_timeline to destroy
*
- * A sync implemntation should call this when the @obj is going away
- * (i.e. module unload.) @obj won't actually be freed until all its childern
+ * A sync implementation should call this when the @obj is going away
+ * (i.e. module unload.) @obj won't actually be freed until all its children
* sync_pts are freed.
*/
void sync_timeline_destroy(struct sync_timeline *obj);
@@ -226,7 +226,7 @@ void sync_timeline_destroy(struct sync_timeline *obj);
* sync_timeline_signal() - signal a status change on a sync_timeline
* @obj: sync_timeline to signal
*
- * A sync implemntation should call this any time one of it's sync_pts
+ * A sync implementation should call this any time one of it's sync_pts
* has signaled or has an error condition.
*/
void sync_timeline_signal(struct sync_timeline *obj);
@@ -236,8 +236,8 @@ void sync_timeline_signal(struct sync_timeline *obj);
* @parent: sync_pt's parent sync_timeline
* @size: size to allocate for this pt
*
- * Creates a new sync_pt as a chiled of @parent. @size bytes will be
- * allocated allowing for implemntation specific data to be kept after
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
* the generic sync_timeline struct.
*/
struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
@@ -287,7 +287,7 @@ struct sync_fence *sync_fence_merge(const char *name,
struct sync_fence *sync_fence_fdget(int fd);
/**
- * sync_fence_put() - puts a refernnce of a sync fence
+ * sync_fence_put() - puts a reference of a sync fence
* @fence: fence to put
*
* Puts a reference on @fence. If this is the last reference, the fence and
@@ -297,7 +297,7 @@ void sync_fence_put(struct sync_fence *fence);
/**
* sync_fence_install() - installs a fence into a file descriptor
- * @fence: fence to instal
+ * @fence: fence to install
* @fd: file descriptor in which to install the fence
*
* Installs @fence into @fd. @fd's should be acquired through get_unused_fd().
@@ -359,10 +359,10 @@ struct sync_merge_data {
* struct sync_pt_info - detailed sync_pt information
* @len: length of sync_pt_info including any driver_data
* @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implmenting the parent
+ * @driver_name: name of driver implementing the parent
* @status: status of the sync_pt 0:active 1:signaled <0:error
* @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependant data
+ * @driver_data: any driver dependent data
*/
struct sync_pt_info {
__u32 len;
@@ -377,7 +377,7 @@ struct sync_pt_info {
/**
* struct sync_fence_info_data - data returned from fence info ioctl
* @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data reutnred to userspace
+ * ioctl returns length of sync_fence_data returned to userspace
* including pt_info.
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
@@ -418,7 +418,7 @@ struct sync_fence_info_data {
* pt_info.
*
* pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
*/
#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
struct sync_fence_info_data)
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644
index 0000000..f09e7c1
--- /dev/null
+++ b/drivers/staging/android/uapi/ion.h
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
+ * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
+ * is used to identify the heaps, so only 32
+ * total heap types are supported
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
+ ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_id_mask;
+ unsigned int flags;
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ ion_user_handle_t handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 0000000..ffef06f
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr: a pointer to an area at least as large as size
+ * @offset: offset into the ion buffer to start reading
+ * @size: size to read or write
+ * @write: 1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+ __u64 ptr;
+ __u64 offset;
+ __u64 size;
+ int write;
+ int __padding;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+ _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping. Can be
+ * used by unit tests to emulate a DMA engine as close as possible. Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping. Can be
+ * used by unit tests to test heap map_kernel functions. Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index 9cd5987..f0d6f0c 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -378,7 +378,7 @@ struct bcm_mini_adapter {
UINT uiFlashLayoutMinorVersion;
bool bAllDSDWriteAllow;
bool bSigCorrupted;
- /* this should be set who so ever want to change the Headers. after Wrtie it should be reset immediately. */
+ /* this should be set who so ever want to change the Headers. after Write it should be reset immediately. */
bool bHeaderChangeAllowed;
int SelectedChip;
bool bEndPointHalted;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 87b74ca..f1b6de0 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -160,7 +160,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
struct bcm_ioctl_buffer IoBuffer;
int bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
+ cmd, arg);
if (_IOC_TYPE(cmd) != BCM_IOCTL)
return -EFAULT;
@@ -266,7 +268,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(uiTempVar == EEPROM_REJECT_REG_3) ||
(uiTempVar == EEPROM_REJECT_REG_4))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
}
@@ -274,9 +277,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(PUINT)sWrmBuffer.Data, sizeof(ULONG));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
Status = -EFAULT;
}
break;
@@ -291,7 +296,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Rdms\n");
return -EACCES;
}
@@ -317,7 +323,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
((ULONG)sRdmBuffer.Register & 0x3)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Done On invalid Address : %x Access Denied.\n",
(int)sRdmBuffer.Register);
kfree(temp_buff);
@@ -325,7 +332,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, IoBuffer.OutputLength);
if (bytes > 0) {
Status = STATUS_SUCCESS;
@@ -349,7 +357,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Wrms\n");
return -EACCES;
}
@@ -367,7 +376,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
((ULONG)sWrmBuffer.Register & 0x3)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)sWrmBuffer.Register);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM Done On invalid Address : %x Access Denied.\n",
+ (int)sWrmBuffer.Register);
return -EINVAL;
}
@@ -379,17 +390,21 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(uiTempVar == EEPROM_REJECT_REG_4)) &&
(cmd == IOCTL_BCM_REGISTER_WRITE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
}
Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data, sWrmBuffer.Length);
+ (PUINT)sWrmBuffer.Data,
+ sWrmBuffer.Length);
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
Status = -EFAULT;
}
break;
@@ -405,7 +420,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
return -EACCES;
}
@@ -423,7 +440,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
value = (1<<uiBit);
if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", value);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
+ value);
Status = -EINVAL;
break;
}
@@ -431,27 +451,42 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
/* Set - setting 1 */
if (uiOperation) {
/* Set the gpio output register */
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)(&value), sizeof(UINT));
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to set the %dth GPIO\n", uiBit);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to set the %dth GPIO\n",
+ uiBit);
break;
}
} else {
/* Set the gpio output register */
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)(&value), sizeof(UINT));
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_CLR_REG,
+ (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to clear the %dth GPIO\n", uiBit);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to clear the %dth GPIO\n",
+ uiBit);
break;
}
}
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
@@ -467,9 +502,13 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(PUINT)ucResetValue, sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO to output Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Set the GPIO to output Mode\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Failed to put GPIO in Output Mode\n");
break;
}
}
@@ -477,13 +516,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
case BCM_LED_THREAD_STATE_CHANGE_REQ: {
struct bcm_user_thread_req threadReq = {0};
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "User made LED thread InActive");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "User made LED thread InActive");
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
Status = -EACCES;
break;
}
@@ -500,10 +542,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
/* if LED thread is running(Actively or Inactively) set it state to make inactive */
if (Adapter->LEDInfo.led_thread_running) {
if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Activating thread req");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Activating thread req");
Adapter->DriverState = LED_THREAD_ACTIVE;
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DeActivating Thread req.....");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "DeActivating Thread req.....");
Adapter->DriverState = LED_THREAD_INACTIVE;
}
@@ -540,7 +586,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Failed\n");
return Status;
} else {
Status = STATUS_SUCCESS;
@@ -570,9 +617,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
return -EFAULT;
if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
"Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
+ Adapter->gpioBitMap);
Status = -EINVAL;
break;
}
@@ -590,7 +639,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(PUINT)ucResetValue, sizeof(ULONG));
if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
return Status;
}
@@ -603,7 +653,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG));
if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
return Status;
}
}
@@ -613,7 +664,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
} else {
Status = STATUS_SUCCESS;
@@ -1190,7 +1242,7 @@ cntrlEnd:
break;
case IOCTL_BCM_CAL_INIT: {
- UINT uiSectorSize = 0 ;
+ UINT uiSectorSize = 0;
if (Adapter->eNVMType == NVM_FLASH) {
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
@@ -1403,7 +1455,7 @@ cntrlEnd:
case IOCTL_BCM_FLASH2X_SECTION_READ: {
struct bcm_flash2x_readwrite sFlash2xRead = {0};
- PUCHAR pReadBuff = NULL ;
+ PUCHAR pReadBuff = NULL;
UINT NOB = 0;
UINT BuffSize = 0;
UINT ReadBytes = 0;
@@ -1438,7 +1490,7 @@ cntrlEnd:
else
BuffSize = NOB;
- ReadOffset = sFlash2xRead.offset ;
+ ReadOffset = sFlash2xRead.offset;
OutPutBuff = IoBuffer.OutputBuffer;
pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
@@ -1483,7 +1535,7 @@ cntrlEnd:
NOB = NOB - ReadBytes;
if (NOB) {
ReadOffset = ReadOffset + ReadBytes;
- OutPutBuff = OutPutBuff + ReadBytes ;
+ OutPutBuff = OutPutBuff + ReadBytes;
}
}
@@ -1538,7 +1590,7 @@ cntrlEnd:
if (NOB > Adapter->uiSectorSize)
BuffSize = Adapter->uiSectorSize;
else
- BuffSize = NOB ;
+ BuffSize = NOB;
pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
@@ -1718,12 +1770,12 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exist in Flash ", sCopySectStrut.SrcSection);
return -EINVAL;
}
if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exist in Flash ", sCopySectStrut.DstSection);
return -EINVAL;
}
@@ -1828,7 +1880,7 @@ cntrlEnd:
SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
if (SectOfset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exist in Flash 2.x", eFlash2xSectionVal);
return -EINVAL;
}
@@ -1841,10 +1893,10 @@ cntrlEnd:
case IOCTL_BCM_NVM_RAW_READ: {
struct bcm_nvm_readwrite stNVMRead;
- INT NOB ;
- INT BuffSize ;
+ INT NOB;
+ INT BuffSize;
INT ReadOffset = 0;
- UINT ReadBytes = 0 ;
+ UINT ReadBytes = 0;
PUCHAR pReadBuff;
void __user *OutPutBuff;
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f..8dfdd27 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
return 0;
}
-static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index 9f7e30f..ed285b2 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -5,882 +5,865 @@
#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
#define MIPS_CLOCK_REG 0x0f000820
- //DDR INIT-133Mhz
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 //index for 0x0F007000
-static struct bcm_ddr_setting asT3_DDRSetting133MHz[]= {// # DPLL Clock Setting
- {0x0F000800,0x00007212},
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00000F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF1B00},
- {0x0f000870,0x00000002},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a04C,0x0000000C},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020202},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x110a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101010},//ROB - 0x02101010,//0x02101018},
- {0x0F007040,0x45751200},//ROB - 0x45751200,//0x450f1200},
- {0x0F007044,0x110a0d00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x081b0306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010246c},
- {0x0F007064,0x00000010},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
-//80Mhz
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 //index for 0x0F007000
-static struct bcm_ddr_setting asT3_DDRSetting80MHz[]= {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07f1ffff},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a000,0x00000016},
- {0x0F00a04C,0x0000000C},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01000000},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},
- {0x0F007020,0x04020107},
- {0x0F007024,0x00000007},
- {0x0F007028,0x02020201},
- {0x0F00702c,0x0204040a},
- {0x0F007030,0x04000000},
- {0x0F007034,0x00000002},
- {0x0F007038,0x1F060200},
- {0x0F00703C,0x1C22221F},
- {0x0F007040,0x8A006600},
- {0x0F007044,0x221a0800},
- {0x0F007048,0x02690204},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x000A15D6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00004000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
-//100Mhz
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 //index for 0x0F007000
-static struct bcm_ddr_setting asT3_DDRSetting100MHz[]= {// # DPLL Clock Setting
- {0x0F000800,0x00007008},
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F13E3F},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- //0x0f000840,0x0FFF1800,
- {0x0f000840,0x0FFF1B00},
- {0x0f000870,0x00000002},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a04C,0x0000000C},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001}, // POP - 0x00020000 Normal 0x01020000
- {0x0F007020,0x04020107},//Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020201},
- {0x0F00702c,0x0204040A},
- {0x0F007030,0x06000000},
- {0x0F007034,0x00000004},
- {0x0F007038,0x20080200},
- {0x0F00703C,0x02030320},
- {0x0F007040,0x6E7F1200},
- {0x0F007044,0x01190A00},
- {0x0F007048,0x06120305},//0x02690204 // 0x06120305
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001C},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
+/* DDR INIT-133Mhz */
+#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0F000800, 0x00007212},
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00000F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF1B00},
+ {0x0f000870, 0x00000002},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a04C, 0x0000000C},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020202},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x110a0200},
+ {0x0F00703C, 0x02101010},
+ {0x0F007040, 0x45751200},
+ {0x0F007044, 0x110a0d00},
+ {0x0F007048, 0x081b0306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010246c},
+ {0x0F007064, 0x00000010},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
+/* 80Mhz */
+#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07f1ffff},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a000, 0x00000016},
+ {0x0F00a04C, 0x0000000C},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01000000},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x02020201},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x04000000},
+ {0x0F007034, 0x00000002},
+ {0x0F007038, 0x1F060200},
+ {0x0F00703C, 0x1C22221F},
+ {0x0F007040, 0x8A006600},
+ {0x0F007044, 0x221a0800},
+ {0x0F007048, 0x02690204},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x000A15D6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00004000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
+/* 100Mhz */
+#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0F000800, 0x00007008},
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F13E3F},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF1B00},
+ {0x0f000870, 0x00000002},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a04C, 0x0000000C},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020201},
+ {0x0F00702c, 0x0204040A},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x00000004},
+ {0x0F007038, 0x20080200},
+ {0x0F00703C, 0x02030320},
+ {0x0F007040, 0x6E7F1200},
+ {0x0F007044, 0x01190A00},
+ {0x0F007048, 0x06120305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001C},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
-//Net T3B DDR Settings
-//DDR INIT-133Mhz
+/* Net T3B DDR Settings
+ * DDR INIT-133Mhz
+ */
static struct bcm_ddr_setting asDPLL_266MHZ[] = {
- {0x0F000800,0x00007212},
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00000F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF1B00},
- {0x0f000870,0x00000002}
- };
+ {0x0F000800, 0x00007212},
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00000F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF1B00},
+ {0x0f000870, 0x00000002}
+};
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 //index for 0x0F007000
-static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000810,0x00000F95},
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F13652},
- {0x0f000840,0x0FFF0800},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020202},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x130a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101012},//ROB - 0x02101010,//0x02101018},
- {0x0F007040,0x457D1200},//ROB - 0x45751200,//0x450f1200},
- {0x0F007044,0x11130d00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x040D0306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010246c},
- {0x0F007064,0x00000012},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000},
- };
+#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000810, 0x00000F95},
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F13652},
+ {0x0f000840, 0x0FFF0800},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000880, 0x000003DD},
+ {0x0f000860, 0x00000000},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020202},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x130a0200},
+ {0x0F00703C, 0x02101012},
+ {0x0F007040, 0x457D1200},
+ {0x0F007044, 0x11130d00},
+ {0x0F007048, 0x040D0306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010246c},
+ {0x0F007064, 0x00000012},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000},
+ };
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F13FFF},
- {0x0f000840,0x0FFF1F00},
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000},
+#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000840, 0x0FFF1F00},
+ {0x0f000880, 0x000003DD},
+ {0x0f000860, 0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01000000},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},
- {0x0F007020,0x04020107},
- {0x0F007024,0x00000007},
- {0x0F007028,0x02020201},
- {0x0F00702c,0x0204040a},
- {0x0F007030,0x04000000},
- {0x0F007034,0x02000002},
- {0x0F007038,0x1F060202},
- {0x0F00703C,0x1C22221F},
- {0x0F007040,0x8A006600},
- {0x0F007044,0x221a0800},
- {0x0F007048,0x02690204},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x000A15D6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00004000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01000000},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x02020201},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x04000000},
+ {0x0F007034, 0x02000002},
+ {0x0F007038, 0x1F060202},
+ {0x0F00703C, 0x1C22221F},
+ {0x0F007040, 0x8A006600},
+ {0x0F007044, 0x221a0800},
+ {0x0F007048, 0x02690204},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x000A15D6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00004000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
-//100Mhz
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F1369B},
- {0x0f000840,0x0FFF0800},
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000}, // POP - 0x00020000 Normal 0x01020000
- {0x0F007020,0x04020107},//Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020201},
- {0x0F00702c,0x0204040A},
- {0x0F007030,0x06000000},
- {0x0F007034,0x02000004},
- {0x0F007038,0x20080200},
- {0x0F00703C,0x02030320},
- {0x0F007040,0x6E7F1200},
- {0x0F007044,0x01190A00},
- {0x0F007048,0x06120305},//0x02690204 // 0x06120305
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001C},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
+/* 100Mhz */
+#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F1369B},
+ {0x0f000840, 0x0FFF0800},
+ {0x0f000880, 0x000003DD},
+ {0x0f000860, 0x00000000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020201},
+ {0x0F00702c, 0x0204040A},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x02000004},
+ {0x0F007038, 0x20080200},
+ {0x0F00703C, 0x02030320},
+ {0x0F007040, 0x6E7F1200},
+ {0x0F007044, 0x01190A00},
+ {0x0F007048, 0x06120305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001C},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x03F1365B},
- {0x0f000810,0x00002F95},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0f000860,0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020200},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x200a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101020},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x45711200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x110D0D00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x04080306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010245F},
- {0x0F007064,0x00000010},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00040000},
- {0x0F007098,0x00000000},
- {0x0F0070c8,0x00000104},
- //# Enable 2 ports within X-bar
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x03F1365B},
+ {0x0f000810, 0x00002F95},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0f000860, 0x00000000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020200},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x200a0200},
+ {0x0F00703C, 0x02101020},
+ {0x0F007040, 0x45711200},
+ {0x0F007044, 0x110D0D00},
+ {0x0F007048, 0x04080306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010245F},
+ {0x0F007064, 0x00000010},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00040000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070c8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[]= {// # DPLL Clock Setting
- {0x0f000810,0x00002F95},
- {0x0f000820,0x03F1369B},
- {0x0f000840,0x0fff0000},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04020107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020200},
- {0x0F00702c,0x0204040a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x06000000},
- {0x0F007034,0x00000004},
- {0x0F007038,0x1F080200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x0203031F},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x6e001200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x011a0a00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x03000305},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- {0x0F007094,0x00010000},
- {0x0F007098,0x00000000},
- {0x0F0070C8,0x00000104},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00002F95},
+ {0x0f000820, 0x03F1369B},
+ {0x0f000840, 0x0fff0000},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020200},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x00000004},
+ {0x0F007038, 0x1F080200},
+ {0x0F00703C, 0x0203031F},
+ {0x0F007040, 0x6e001200},
+ {0x0F007044, 0x011a0a00},
+ {0x0F007048, 0x03000305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ {0x0F007094, 0x00010000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00002F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0f000840,0x0FFF1F00},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- {0x0f007000,0x00010001},
- {0x0f007004,0x01000000},
- {0x0f007008,0x01000001},
- {0x0f00700c,0x00000000},
- {0x0f007010,0x01000000},
- {0x0f007014,0x01000100},
- {0x0f007018,0x01000000},
- {0x0f00701c,0x01020000},
- {0x0f007020,0x04020107},
- {0x0f007024,0x00000007},
- {0x0f007028,0x02020200},
- {0x0f00702c,0x0204040a},
- {0x0f007030,0x04000000},
- {0x0f007034,0x00000002},
- {0x0f007038,0x1d060200},
- {0x0f00703c,0x1c22221d},
- {0x0f007040,0x8A116600},
- {0x0f007044,0x222d0800},
- {0x0f007048,0x02690204},
- {0x0f00704c,0x00000000},
- {0x0f007050,0x0100001c},
- {0x0f007054,0x00000000},
- {0x0f007058,0x00000000},
- {0x0f00705c,0x00000000},
- {0x0f007060,0x000A15D6},
- {0x0f007064,0x0000000A},
- {0x0f007068,0x00000000},
- {0x0f00706c,0x00000001},
- {0x0f007070,0x00004000},
- {0x0f007074,0x00000000},
- {0x0f007078,0x00000000},
- {0x0f00707c,0x00000000},
- {0x0f007080,0x00000000},
- {0x0f007084,0x00000000},
- {0x0f007088,0x01000001},
- {0x0f00708c,0x00000101},
- {0x0f007090,0x00000000},
- {0x0f007094,0x00010000},
- {0x0f007098,0x00000000},
- {0x0F0070C8,0x00000104},
- {0x0F007018,0x01010000}
+#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00002F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0f000840, 0x0FFF1F00},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ {0x0f007000, 0x00010001},
+ {0x0f007004, 0x01000000},
+ {0x0f007008, 0x01000001},
+ {0x0f00700c, 0x00000000},
+ {0x0f007010, 0x01000000},
+ {0x0f007014, 0x01000100},
+ {0x0f007018, 0x01000000},
+ {0x0f00701c, 0x01020000},
+ {0x0f007020, 0x04020107},
+ {0x0f007024, 0x00000007},
+ {0x0f007028, 0x02020200},
+ {0x0f00702c, 0x0204040a},
+ {0x0f007030, 0x04000000},
+ {0x0f007034, 0x00000002},
+ {0x0f007038, 0x1d060200},
+ {0x0f00703c, 0x1c22221d},
+ {0x0f007040, 0x8A116600},
+ {0x0f007044, 0x222d0800},
+ {0x0f007048, 0x02690204},
+ {0x0f00704c, 0x00000000},
+ {0x0f007050, 0x0100001c},
+ {0x0f007054, 0x00000000},
+ {0x0f007058, 0x00000000},
+ {0x0f00705c, 0x00000000},
+ {0x0f007060, 0x000A15D6},
+ {0x0f007064, 0x0000000A},
+ {0x0f007068, 0x00000000},
+ {0x0f00706c, 0x00000001},
+ {0x0f007070, 0x00004000},
+ {0x0f007074, 0x00000000},
+ {0x0f007078, 0x00000000},
+ {0x0f00707c, 0x00000000},
+ {0x0f007080, 0x00000000},
+ {0x0f007084, 0x00000000},
+ {0x0f007088, 0x01000001},
+ {0x0f00708c, 0x00000101},
+ {0x0f007090, 0x00000000},
+ {0x0f007094, 0x00010000},
+ {0x0f007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ {0x0F007018, 0x01010000}
};
-///T3 LP-B (UMA-B)
+/* T3 LP-B (UMA-B) */
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[]= {// # DPLL Clock Setting
-
- {0x0f000820,0x03F137DB},
- {0x0f000810,0x01842795},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0f000840,0x0FFF0400},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//this is flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff},//Now dump from her in internal memory
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- {0x0f007000,0x00010001},
- {0x0f007004,0x01000001},
- {0x0f007008,0x01000101},
- {0x0f00700c,0x00000000},
- {0x0f007010,0x01000100},
- {0x0f007014,0x01000100},
- {0x0f007018,0x01000000},
- {0x0f00701c,0x01020000},
- {0x0f007020,0x04030107},
- {0x0f007024,0x02000007},
- {0x0f007028,0x02020200},
- {0x0f00702c,0x0206060a},
- {0x0f007030,0x050d0d00},
- {0x0f007034,0x00000003},
- {0x0f007038,0x170a0200},
- {0x0f00703c,0x02101012},
- {0x0f007040,0x45161200},
- {0x0f007044,0x11250c00},
- {0x0f007048,0x04da0307},
- {0x0f00704c,0x00000000},
- {0x0f007050,0x0000001c},
- {0x0f007054,0x00000000},
- {0x0f007058,0x00000000},
- {0x0f00705c,0x00000000},
- {0x0f007060,0x00142bb6},
- {0x0f007064,0x20430014},
- {0x0f007068,0x00000000},
- {0x0f00706c,0x00000001},
- {0x0f007070,0x00009000},
- {0x0f007074,0x00000000},
- {0x0f007078,0x00000000},
- {0x0f00707c,0x00000000},
- {0x0f007080,0x00000000},
- {0x0f007084,0x00000000},
- {0x0f007088,0x01000001},
- {0x0f00708c,0x00000101},
- {0x0f007090,0x00000000},
- {0x0f007094,0x00040000},
- {0x0f007098,0x00000000},
- {0x0F0070C8,0x00000104},
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x03F137DB},
+ {0x0f000810, 0x01842795},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0f000840, 0x0FFF0400},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* this is flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* Now dump from her in internal memory */
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ {0x0f007000, 0x00010001},
+ {0x0f007004, 0x01000001},
+ {0x0f007008, 0x01000101},
+ {0x0f00700c, 0x00000000},
+ {0x0f007010, 0x01000100},
+ {0x0f007014, 0x01000100},
+ {0x0f007018, 0x01000000},
+ {0x0f00701c, 0x01020000},
+ {0x0f007020, 0x04030107},
+ {0x0f007024, 0x02000007},
+ {0x0f007028, 0x02020200},
+ {0x0f00702c, 0x0206060a},
+ {0x0f007030, 0x050d0d00},
+ {0x0f007034, 0x00000003},
+ {0x0f007038, 0x170a0200},
+ {0x0f00703c, 0x02101012},
+ {0x0f007040, 0x45161200},
+ {0x0f007044, 0x11250c00},
+ {0x0f007048, 0x04da0307},
+ {0x0f00704c, 0x00000000},
+ {0x0f007050, 0x0000001c},
+ {0x0f007054, 0x00000000},
+ {0x0f007058, 0x00000000},
+ {0x0f00705c, 0x00000000},
+ {0x0f007060, 0x00142bb6},
+ {0x0f007064, 0x20430014},
+ {0x0f007068, 0x00000000},
+ {0x0f00706c, 0x00000001},
+ {0x0f007070, 0x00009000},
+ {0x0f007074, 0x00000000},
+ {0x0f007078, 0x00000000},
+ {0x0f00707c, 0x00000000},
+ {0x0f007080, 0x00000000},
+ {0x0f007084, 0x00000000},
+ {0x0f007088, 0x01000001},
+ {0x0f00708c, 0x00000101},
+ {0x0f007090, 0x00000000},
+ {0x0f007094, 0x00040000},
+ {0x0f007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ {0x0F007018, 0x01010000}
};
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x03F1365B},
- {0x0f000810,0x00002F95},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0f000860,0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff},//dump from here in internal memory
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020200},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x190a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101017},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x45171200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x11290D00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x04080306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010245F},
- {0x0F007064,0x00000010},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00040000},
- {0x0F007098,0x00000000},
- {0x0F0070c8,0x00000104},
- //# Enable 2 ports within X-bar
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x03F1365B},
+ {0x0f000810, 0x00002F95},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0f000860, 0x00000000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020200},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x190a0200},
+ {0x0F00703C, 0x02101017},
+ {0x0F007040, 0x45171200},
+ {0x0F007044, 0x11290D00},
+ {0x0F007048, 0x04080306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010245F},
+ {0x0F007064, 0x00000010},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00040000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070c8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[]= {// # DPLL Clock Setting
- {0x0f000810,0x00002F95},
- {0x0f000820,0x03F1369B},
- {0x0f000840,0x0fff0000},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff}, //dump from here in internal memory
- {0x0F00a080,0x1C000000},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04020107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020200},
- {0x0F00702c,0x0204040a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x06000000},
- {0x0F007034,0x00000004},
- {0x0F007038,0x1F080200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x0203031F},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x6e001200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x011a0a00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x03000305},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- {0x0F007094,0x00010000},
- {0x0F007098,0x00000000},
- {0x0F0070C8,0x00000104},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00002F95},
+ {0x0f000820, 0x03F1369B},
+ {0x0f000840, 0x0fff0000},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
+ {0x0F00a080, 0x1C000000},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020200},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x00000004},
+ {0x0F007038, 0x1F080200},
+ {0x0F00703C, 0x0203031F},
+ {0x0F007040, 0x6e001200},
+ {0x0F007044, 0x011a0a00},
+ {0x0F007048, 0x03000305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ {0x0F007094, 0x00010000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00002F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0f000840,0x0FFF1F00},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff},// dump from here in internal memory
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- {0x0f007000,0x00010001},
- {0x0f007004,0x01000000},
- {0x0f007008,0x01000001},
- {0x0f00700c,0x00000000},
- {0x0f007010,0x01000000},
- {0x0f007014,0x01000100},
- {0x0f007018,0x01000000},
- {0x0f00701c,0x01020000},
- {0x0f007020,0x04020107},
- {0x0f007024,0x00000007},
- {0x0f007028,0x02020200},
- {0x0f00702c,0x0204040a},
- {0x0f007030,0x04000000},
- {0x0f007034,0x00000002},
- {0x0f007038,0x1d060200},
- {0x0f00703c,0x1c22221d},
- {0x0f007040,0x8A116600},
- {0x0f007044,0x222d0800},
- {0x0f007048,0x02690204},
- {0x0f00704c,0x00000000},
- {0x0f007050,0x0100001c},
- {0x0f007054,0x00000000},
- {0x0f007058,0x00000000},
- {0x0f00705c,0x00000000},
- {0x0f007060,0x000A15D6},
- {0x0f007064,0x0000000A},
- {0x0f007068,0x00000000},
- {0x0f00706c,0x00000001},
- {0x0f007070,0x00004000},
- {0x0f007074,0x00000000},
- {0x0f007078,0x00000000},
- {0x0f00707c,0x00000000},
- {0x0f007080,0x00000000},
- {0x0f007084,0x00000000},
- {0x0f007088,0x01000001},
- {0x0f00708c,0x00000101},
- {0x0f007090,0x00000000},
- {0x0f007094,0x00010000},
- {0x0f007098,0x00000000},
- {0x0F0070C8,0x00000104},
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00002F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0f000840, 0x0FFF1F00},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ {0x0f007000, 0x00010001},
+ {0x0f007004, 0x01000000},
+ {0x0f007008, 0x01000001},
+ {0x0f00700c, 0x00000000},
+ {0x0f007010, 0x01000000},
+ {0x0f007014, 0x01000100},
+ {0x0f007018, 0x01000000},
+ {0x0f00701c, 0x01020000},
+ {0x0f007020, 0x04020107},
+ {0x0f007024, 0x00000007},
+ {0x0f007028, 0x02020200},
+ {0x0f00702c, 0x0204040a},
+ {0x0f007030, 0x04000000},
+ {0x0f007034, 0x00000002},
+ {0x0f007038, 0x1d060200},
+ {0x0f00703c, 0x1c22221d},
+ {0x0f007040, 0x8A116600},
+ {0x0f007044, 0x222d0800},
+ {0x0f007048, 0x02690204},
+ {0x0f00704c, 0x00000000},
+ {0x0f007050, 0x0100001c},
+ {0x0f007054, 0x00000000},
+ {0x0f007058, 0x00000000},
+ {0x0f00705c, 0x00000000},
+ {0x0f007060, 0x000A15D6},
+ {0x0f007064, 0x0000000A},
+ {0x0f007068, 0x00000000},
+ {0x0f00706c, 0x00000001},
+ {0x0f007070, 0x00004000},
+ {0x0f007074, 0x00000000},
+ {0x0f007078, 0x00000000},
+ {0x0f00707c, 0x00000000},
+ {0x0f007080, 0x00000000},
+ {0x0f007084, 0x00000000},
+ {0x0f007088, 0x01000001},
+ {0x0f00708c, 0x00000101},
+ {0x0f007090, 0x00000000},
+ {0x0f007094, 0x00010000},
+ {0x0f007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ {0x0F007018, 0x01010000}
};
int ddr_init(struct bcm_mini_adapter *Adapter)
{
- struct bcm_ddr_setting *psDDRSetting=NULL;
- ULONG RegCount=0;
+ struct bcm_ddr_setting *psDDRSetting = NULL;
+ ULONG RegCount = 0;
UINT value = 0;
UINT uiResetValue = 0;
UINT uiClockSetting = 0;
int retval = STATUS_SUCCESS;
- switch (Adapter->chip_id)
- {
+ switch (Adapter->chip_id) {
case 0xbece3200:
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting=asT3LP_DDRSetting80MHz;
- RegCount=(sizeof(asT3LP_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting=asT3LP_DDRSetting100MHz;
- RegCount=(sizeof(asT3LP_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting=asT3LP_DDRSetting133MHz;
- RegCount=(sizeof(asT3LP_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x03F13652;
- }
- else
- {
- uiClockSetting = 0x03F1365B;
- }
- break;
- default:
- return -EINVAL;
- }
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LP_DDRSetting80MHz;
+ RegCount = (sizeof(asT3LP_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LP_DDRSetting100MHz;
+ RegCount = (sizeof(asT3LP_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_133_MHZ:
+ psDDRSetting = asT3LP_DDRSetting133MHz;
+ RegCount = (sizeof(asT3LP_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x03F13652;
+ else
+ uiClockSetting = 0x03F1365B;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
case T3LPB:
case BCS220_2:
case BCS220_2BC:
case BCS250_BC:
- case BCS220_3 :
+ case BCS220_3:
/* Set bit 2 and bit 6 to 1 for BBIC 2mA drive
* (please check current value and additionally set these bits)
*/
- if( (Adapter->chip_id != BCS220_2) &&
- (Adapter->chip_id != BCS220_2BC) &&
- (Adapter->chip_id != BCS220_3) )
- {
- retval= rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
- return retval;
- }
- uiResetValue |= 0x44;
- retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
- return retval;
- }
+ if ((Adapter->chip_id != BCS220_2) &&
+ (Adapter->chip_id != BCS220_2BC) &&
+ (Adapter->chip_id != BCS220_3)) {
+ retval = rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
+ return retval;
}
- switch(Adapter->DDRSetting)
- {
+ uiResetValue |= 0x44;
+ retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
+ return retval;
+ }
+ }
+ switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount=(sizeof(asT3B_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting80MHz;
+ RegCount = (sizeof(asT3B_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
break;
- case DDR_100_MHZ:
- psDDRSetting=asT3LPB_DDRSetting100MHz;
- RegCount=(sizeof(asT3B_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting100MHz;
+ RegCount = (sizeof(asT3B_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
break;
- case DDR_133_MHZ:
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount=(sizeof(asT3B_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
+ case DDR_133_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting133MHz;
+ RegCount = (sizeof(asT3B_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x03F13652;
- }
- else
- {
- uiClockSetting = 0x03F1365B;
- }
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x03F13652;
+ else
+ uiClockSetting = 0x03F1365B;
break;
- case DDR_160_MHZ:
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = sizeof(asT3LPB_DDRSetting160MHz)/sizeof(struct bcm_ddr_setting);
+ case DDR_160_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting160MHz;
+ RegCount = sizeof(asT3LPB_DDRSetting160MHz)/sizeof(struct bcm_ddr_setting);
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x03F137D2;
- }
- else
- {
- uiClockSetting = 0x03F137DB;
- }
- }
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x03F137D2;
+ else
+ uiClockSetting = 0x03F137DB;
+ }
break;
case 0xbece0110:
@@ -888,68 +871,59 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
case 0xbece0121:
case 0xbece0130:
case 0xbece0300:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = (sizeof(asT3_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = (sizeof(asT3_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = (sizeof(asT3_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- default:
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3_DDRSetting80MHz;
+ RegCount = (sizeof(asT3_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3_DDRSetting100MHz;
+ RegCount = (sizeof(asT3_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_133_MHZ:
+ psDDRSetting = asT3_DDRSetting133MHz;
+ RegCount = (sizeof(asT3_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ default:
+ return -EINVAL;
+ }
case 0xbece0310:
{
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount=(sizeof(asT3B_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting=asT3B_DDRSetting100MHz;
- RegCount=(sizeof(asT3B_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3B_DDRSetting80MHz;
+ RegCount = (sizeof(asT3B_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3B_DDRSetting100MHz;
+ RegCount = (sizeof(asT3B_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
break;
- case DDR_133_MHZ:
+ case DDR_133_MHZ:
- if(Adapter->bDPLLConfig == PLL_266_MHZ)//266Mhz PLL selected.
- {
- memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ,
- sizeof(asDPLL_266MHZ));
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount=(sizeof(asT3B_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- }
+ if (Adapter->bDPLLConfig == PLL_266_MHZ) { /* 266Mhz PLL selected. */
+ memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ,
+ sizeof(asDPLL_266MHZ));
+ psDDRSetting = asT3B_DDRSetting133MHz;
+ RegCount = (sizeof(asT3B_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ } else {
+ psDDRSetting = asT3B_DDRSetting133MHz;
+ RegCount = (sizeof(asT3B_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x07F13652;
else
- {
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount=(sizeof(asT3B_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x07F13652;
- }
- else
- {
- uiClockSetting = 0x07F1365B;
- }
- }
- break;
- default:
- return -EINVAL;
+ uiClockSetting = 0x07F1365B;
+ }
+ break;
+ default:
+ return -EINVAL;
}
break;
@@ -958,20 +932,15 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
return -EINVAL;
}
- value=0;
+ value = 0;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount);
- while(RegCount && !retval)
- {
- if(uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
- {
+ while (RegCount && !retval) {
+ if (uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
value = uiClockSetting;
- }
else
- {
value = psDDRSetting->ulRegValue;
- }
retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value));
- if(STATUS_SUCCESS != retval) {
+ if (STATUS_SUCCESS != retval) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
@@ -980,36 +949,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
psDDRSetting++;
}
- if(Adapter->chip_id >= 0xbece3300 )
- {
+ if (Adapter->chip_id >= 0xbece3300) {
mdelay(3);
- if( (Adapter->chip_id != BCS220_2)&&
- (Adapter->chip_id != BCS220_2BC)&&
- (Adapter->chip_id != BCS220_3))
- {
+ if ((Adapter->chip_id != BCS220_2) &&
+ (Adapter->chip_id != BCS220_2BC) &&
+ (Adapter->chip_id != BCS220_3)) {
/* drive MDDR to half in case of UMA-B: */
uiResetValue = 0x01010001;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x00040020;
retval = wrmalt(Adapter, (UINT)0x0F007094, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01020101;
retval = wrmalt(Adapter, (UINT)0x0F00701c, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01010000;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
@@ -1020,75 +987,72 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
* This is to be done only for Hybrid PMU mode.
* with the current h/w there is no way to detect this.
* and since we dont have internal PMU lets do it under UMA-B chip id.
- * we will change this when we will have internal PMU.
- */
- if(Adapter->PmuMode == HYBRID_MODE_7C)
- {
+ * we will change this when we will have internal PMU.
+ */
+ if (Adapter->PmuMode == HYBRID_MODE_7C) {
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x132296;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- }
- else if(Adapter->PmuMode == HYBRID_MODE_6 )
- {
+ } else if (Adapter->PmuMode == HYBRID_MODE_6) {
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x6003229a;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
@@ -1101,179 +1065,167 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
int download_ddr_settings(struct bcm_mini_adapter *Adapter)
{
- struct bcm_ddr_setting *psDDRSetting=NULL;
- ULONG RegCount=0;
+ struct bcm_ddr_setting *psDDRSetting = NULL;
+ ULONG RegCount = 0;
unsigned long ul_ddr_setting_load_addr = DDR_DUMP_INTERNAL_DEVICE_MEMORY;
UINT value = 0;
int retval = STATUS_SUCCESS;
bool bOverrideSelfRefresh = false;
- switch (Adapter->chip_id)
- {
+ switch (Adapter->chip_id) {
case 0xbece3200:
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3LP_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LP_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz);
+ RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LP_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz);
+ RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3LP_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz);
+ RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LP_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LP_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
default:
return -EINVAL;
- }
+ }
break;
case T3LPB:
case BCS220_2:
case BCS220_2BC:
case BCS250_BC:
- case BCS220_3 :
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount=ARRAY_SIZE(asT3LPB_DDRSetting80MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ case BCS220_3:
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting80MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3LPB_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LPB_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- case DDR_160_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
+ case DDR_160_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3LPB_DDRSetting160MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
- break;
- default:
- return -EINVAL;
- }
+ break;
+ default:
+ return -EINVAL;
+ }
break;
case 0xbece0300:
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz);
- RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz);
+ RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz);
- RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz);
- RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- break;
- default:
- return -EINVAL;
- }
+ case DDR_100_MHZ:
+ psDDRSetting = asT3_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz);
+ RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ psDDRSetting = asT3_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz);
+ RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
case 0xbece0310:
{
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3B_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- }
- break;
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3B_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz);
+ RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3B_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz);
+ RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3B_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz);
+ RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ break;
+ }
+ break;
}
default:
return -EINVAL;
}
- //total number of Register that has to be dumped
- value =RegCount ;
+ /* total number of Register that has to be dumped */
+ value = RegCount;
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
- if(retval)
- {
+ if (retval) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
ul_ddr_setting_load_addr += sizeof(ULONG);
- /*signature */
- value =(0x1d1e0dd0);
+ /* signature */
+ value = (0x1d1e0dd0);
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
- if(retval)
- {
+ if (retval) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
ul_ddr_setting_load_addr += sizeof(ULONG);
- RegCount*=(sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
+ RegCount *= (sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
- while(RegCount && !retval)
- {
- value = psDDRSetting->ulRegAddress ;
- retval = wrmalt( Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
+ while (RegCount && !retval) {
+ value = psDDRSetting->ulRegAddress;
+ retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
ul_ddr_setting_load_addr += sizeof(ULONG);
- if(!retval)
- {
- if(bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018))
- {
+ if (!retval) {
+ if (bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018)) {
value = (psDDRSetting->ulRegValue |(1<<8));
- if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
- &value, sizeof(value))){
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
- break;
- }
+ if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
+ &value, sizeof(value))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
+ break;
}
- else
- {
+ } else {
value = psDDRSetting->ulRegValue;
- if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr ,
- &value, sizeof(value))){
+ if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr ,
+ &value, sizeof(value))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
@@ -1283,7 +1235,5 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
RegCount--;
psDDRSetting++;
}
- return retval;
+ return retval;
}
-
-
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index 463bdee..005e460 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -20,18 +20,10 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
MAX_TRANSFER_CTRL_BYTE_USB, &pos);
set_fs(oldfs);
if (len <= 0) {
- if (len < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, MP_INIT,
- DBG_LVL_ALL, "len < 0");
+ if (len < 0)
errno = len;
- } else {
+ else
errno = 0;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, MP_INIT,
- DBG_LVL_ALL,
- "Got end of file!");
- }
break;
}
/* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT,
@@ -39,12 +31,8 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
* MAX_TRANSFER_CTRL_BYTE_USB);
*/
errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len);
- if (errno) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "WRM Failed! status: %d", errno);
+ if (errno)
break;
- }
on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
}
@@ -52,7 +40,8 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
return errno;
}
-int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_chip_loc)
+int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp,
+ unsigned int on_chip_loc)
{
char *buff, *buff_readback;
unsigned int reg = 0;
@@ -80,32 +69,28 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
while (1) {
oldfs = get_fs();
set_fs(get_ds());
- len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos);
+ len = vfs_read(flp, (void __force __user *)buff,
+ MAX_TRANSFER_CTRL_BYTE_USB, &pos);
set_fs(oldfs);
fw_down++;
if (len <= 0) {
- if (len < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0");
+ if (len < 0)
errno = len;
- } else {
+ else
errno = 0;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!");
- }
break;
}
- bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
+ bytes = InterfaceRDM(psIntfAdapter, on_chip_loc,
+ buff_readback, len);
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
goto exit;
}
reg++;
if ((len-sizeof(unsigned int)) < 4) {
if (memcmp(buff_readback, buff, len)) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Length is: %d", len);
Status = -EIO;
goto exit;
}
@@ -113,10 +98,8 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
len -= 4;
while (len) {
- if (*(unsigned int *)&buff_readback[len] != *(unsigned int *)&buff[len]) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len);
+ if (*(unsigned int *)&buff_readback[len] !=
+ *(unsigned int *)&buff[len]) {
Status = -EIO;
goto exit;
}
@@ -132,13 +115,15 @@ exit:
return Status;
}
-static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo)
+static int bcm_download_config_file(struct bcm_mini_adapter *Adapter,
+ struct bcm_firmware_info *psFwInfo)
{
int retval = STATUS_SUCCESS;
B_UINT32 value = 0;
if (Adapter->pstargetparams == NULL) {
- Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
+ Adapter->pstargetparams =
+ kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
if (Adapter->pstargetparams == NULL)
return -ENOMEM;
}
@@ -146,7 +131,9 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params))
return -EIO;
- retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
+ retval = copy_from_user(Adapter->pstargetparams,
+ psFwInfo->pvMappedFirmwareAddress,
+ psFwInfo->u32FirmwareLength);
if (retval) {
kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
@@ -160,52 +147,54 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
BcmInitNVM(Adapter);
retval = InitLedSettings(Adapter);
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n");
+ if (retval)
return retval;
- }
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->LEDInfo.bLedInitDone = false;
Adapter->DriverState = DRIVER_INIT;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = FW_DOWNLOAD;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
/* Initialize the DDR Controller */
retval = ddr_init(Adapter);
- if (retval) {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n");
+ if (retval)
return retval;
- }
value = 0;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
+ wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4,
+ &value, sizeof(value));
+ wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8,
+ &value, sizeof(value));
if (Adapter->eNVMType == NVM_FLASH) {
retval = PropagateCalParamsFromFlashToMemory(Adapter);
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "propagaion of cal param failed with status :%d", retval);
+ if (retval)
return retval;
- }
}
- retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
+ retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams,
+ sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
if (retval)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT,
+ MP_INIT, DBG_LVL_ALL,
+ "configuration file not downloaded properly");
else
Adapter->bCfgDownloaded = TRUE;
return retval;
}
-int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo)
+int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter,
+ struct bcm_firmware_info *psFwInfo)
{
int retval = STATUS_SUCCESS;
PUCHAR buff = NULL;
@@ -215,9 +204,9 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
* Application
*/
atomic_set(&Adapter->uiMBupdate, false);
- if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
+ if (!Adapter->bCfgDownloaded &&
+ psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
/* Can't Download Firmware. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n");
return -EINVAL;
}
@@ -226,14 +215,13 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
retval = bcm_download_config_file(Adapter, psFwInfo);
} else {
buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL);
- if (buff == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed in allocation memory");
+ if (buff == NULL)
return -ENOMEM;
- }
- retval = copy_from_user(buff, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
+ retval = copy_from_user(buff,
+ psFwInfo->pvMappedFirmwareAddress,
+ psFwInfo->u32FirmwareLength);
if (retval != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed");
retval = -EFAULT;
goto error;
}
@@ -243,10 +231,8 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
psFwInfo->u32FirmwareLength,
psFwInfo->u32StartingAddress);
- if (retval != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "f/w download failed status :%d", retval);
+ if (retval != STATUS_SUCCESS)
goto error;
- }
}
error:
@@ -254,7 +240,9 @@ error:
return retval;
}
-static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress)
+static INT buffDnld(struct bcm_mini_adapter *Adapter,
+ PUCHAR mappedbuffer, UINT u32FirmwareLength,
+ ULONG u32StartingAddress)
{
unsigned int len = 0;
int retval = STATUS_SUCCESS;
@@ -264,10 +252,8 @@ static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len);
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval);
+ if (retval)
break;
- }
u32StartingAddress += len;
u32FirmwareLength -= len;
mappedbuffer += len;
@@ -275,17 +261,17 @@ static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT
return retval;
}
-static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress)
+static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter,
+ PUCHAR mappedbuffer, UINT u32FirmwareLength,
+ ULONG u32StartingAddress)
{
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
int bytes;
- if (NULL == readbackbuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
+ if (NULL == readbackbuff)
return -ENOMEM;
- }
while (u32FirmwareLength && !retval) {
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
@@ -293,7 +279,6 @@ static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer,
if (bytes < 0) {
retval = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval);
break;
}
@@ -312,21 +297,22 @@ static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer,
return retval;
}
-INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength, unsigned long u32StartingAddress)
+INT buffDnldVerify(struct bcm_mini_adapter *Adapter,
+ unsigned char *mappedbuffer,
+ unsigned int u32FirmwareLength,
+ unsigned long u32StartingAddress)
{
INT status = STATUS_SUCCESS;
- status = buffDnld(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer download failed");
+ status = buffDnld(Adapter, mappedbuffer,
+ u32FirmwareLength, u32StartingAddress);
+ if (status != STATUS_SUCCESS)
goto error;
- }
- status = buffRdbkVerify(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer readback verifier failed");
+ status = buffRdbkVerify(Adapter, mappedbuffer,
+ u32FirmwareLength, u32StartingAddress);
+ if (status != STATUS_SUCCESS)
goto error;
- }
error:
return status;
}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 5959fbd..fecf81f 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -1,32 +1,37 @@
#include "headers.h"
/*
-Function: InterfaceIdleModeWakeup
+Function: InterfaceIdleModeWakeup
-Description: This is the hardware specific Function for waking up HW device from Idle mode.
- A software abort pattern is written to the device to wake it and necessary power state
- transitions from host are performed here.
+Description: This is the hardware specific Function for
+ waking up HW device from Idle mode.
+ A software abort pattern is written to the
+ device to wake it and necessary power state
+ transitions from host are performed here.
-Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
+Input parameters: IN struct bcm_mini_adapter *Adapter
+ - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface was successful.
- Other - If an error occurred.
+Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface
+ was successful.
+ Other - If an error occurred.
*/
-
/*
-Function: InterfaceIdleModeRespond
+Function: InterfaceIdleModeRespond
-Description: This is the hardware specific Function for responding to Idle mode request from target.
- Necessary power state transitions from host for idle mode or other device specific
- initializations are performed here.
+Description: This is the hardware specific Function for
+ responding to Idle mode request from target.
+ Necessary power state transitions from host for
+ idle mode or other device specific initializations
+ are performed here.
-Input parameters: IN struct bcm_mini_adapter * Adapter - Miniport Adapter Context
+Input parameters: IN struct bcm_mini_adapter * Adapter
+ - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Idle mode response related HW configuration was successful.
- Other - If an error occurred.
+Return: BCM_STATUS_SUCCESS - If Idle mode response related
+ HW configuration was successful.
+ Other - If an error occurred.
*/
/*
@@ -36,59 +41,59 @@ this value will be at address bfc02fa4.just before value d0ea1dle.
Set time value by writing at bfc02f98 7d0
checking the Ack timer expire on kannon by running command
-d qcslog .. if it shows e means host has not send response to f/w with in 200 ms. Response should be
+d qcslog .. if it shows e means host has not send response
+to f/w with in 200 ms. Response should be
send to f/w with in 200 ms after the Idle/Shutdown req issued
*/
-int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *puiBuffer)
+int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter,
+ unsigned int *puiBuffer)
{
int status = STATUS_SUCCESS;
unsigned int uiRegRead = 0;
int bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "SubType of Message :0x%X", ntohl(*puiBuffer));
-
if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, " Got GO_TO_IDLE_MODE_PAYLOAD(210) Msg Subtype");
- if (ntohl(*(puiBuffer+1)) == 0 ) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got IDLE MODE WAKE UP Response From F/W");
+ if (ntohl(*(puiBuffer+1)) == 0) {
- status = wrmalt (Adapter, SW_ABORT_IDLEMODE_LOC, &uiRegRead, sizeof(uiRegRead));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
+ &uiRegRead, sizeof(uiRegRead));
+ if (status)
return status;
- }
- if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- uiRegRead = 0x00000000 ;
- status = wrmalt (Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegRead, sizeof(uiRegRead));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
+ if (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ uiRegRead = 0x00000000;
+ status = wrmalt(Adapter,
+ DEBUG_INTERRUPT_GENERATOR_REGISTOR,
+ &uiRegRead, sizeof(uiRegRead));
+ if (status)
return status;
- }
}
- /* Below Register should not br read in case of Manual and Protocol Idle mode */
- else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
+ /* Below Register should not br read in case of
+ * Manual and Protocol Idle mode */
+ else if (Adapter->ulPowerSaveMode !=
+ DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
/* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0,
+ &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
return status;
}
/* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1,
+ &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
return status;
}
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device Up from Idle Mode");
- /* Set Idle Mode Flag to False and Clear IdleMode reg. */
+ /* Set Idle Mode Flag to False and
+ * Clear IdleMode reg. */
Adapter->IdleMode = false;
Adapter->bTriedToWakeUpFromlowPowerMode = false;
@@ -96,124 +101,123 @@ int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *pui
} else {
if (TRUE == Adapter->IdleMode)
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device is already in Idle mode....");
- return status ;
- }
+ return status;
uiRegRead = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got Req from F/W to go in IDLE mode \n");
if (Adapter->chip_id == BCS220_2 ||
Adapter->chip_id == BCS220_2BC ||
Adapter->chip_id == BCS250_BC ||
Adapter->chip_id == BCS220_3) {
- bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
+ bytes = rdmalt(Adapter, HPM_CONFIG_MSW,
+ &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
return status;
}
uiRegRead |= (1<<17);
- status = wrmalt (Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg\n");
+ status = wrmalt(Adapter, HPM_CONFIG_MSW,
+ &uiRegRead, sizeof(uiRegRead));
+ if (status)
return status;
- }
-
}
SendIdleModeResponse(Adapter);
}
} else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "OverRiding Service Flow Params");
OverrideServiceFlowParams(Adapter, puiBuffer);
}
return status;
}
-static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, unsigned int Pattern)
+static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter,
+ unsigned int Pattern)
{
- int status = STATUS_SUCCESS;
+ int status = STATUS_SUCCESS;
unsigned int value;
- unsigned int chip_id ;
+ unsigned int chip_id;
unsigned long timeout = 0, itr = 0;
- int lenwritten = 0;
- unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- struct bcm_interface_adapter *psInterfaceAdapter = Adapter->pvInterfaceAdapter;
+ int lenwritten = 0;
+ unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF};
+ struct bcm_interface_adapter *psInterfaceAdapter =
+ Adapter->pvInterfaceAdapter;
/* Abort Bus suspend if its already suspended */
- if ((TRUE == psInterfaceAdapter->bSuspended) && (TRUE == Adapter->bDoSuspend)) {
- status = usb_autopm_get_interface(psInterfaceAdapter->interface);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Bus got wakeup..Aborting Idle mode... status:%d \n", status);
-
- }
-
- if ((Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
- ||
- (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
+ if ((TRUE == psInterfaceAdapter->bSuspended) &&
+ (TRUE == Adapter->bDoSuspend))
+ status = usb_autopm_get_interface(
+ psInterfaceAdapter->interface);
+
+ if ((Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) ||
+ (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
/* write the SW abort pattern. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing pattern<%d> to SW_ABORT_IDLEMODE_LOC\n", Pattern);
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(Pattern));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
- return status;
- }
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
+ &Pattern, sizeof(Pattern));
+ if (status)
+ return status;
}
- if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ if (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
value = 0x80000000;
- status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &value, sizeof(value));
+ status = wrmalt(Adapter,
+ DEBUG_INTERRUPT_GENERATOR_REGISTOR,
+ &value, sizeof(value));
if (status)
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Register failed");
return status;
- }
- } else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
+ } else if (Adapter->ulPowerSaveMode !=
+ DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
/*
* Get a Interrupt Out URB and send 8 Bytes Down
* To be Done in Thread Context.
* Not using Asynchronous Mechanism.
*/
- status = usb_interrupt_msg (psInterfaceAdapter->udev,
+ status = usb_interrupt_msg(psInterfaceAdapter->udev,
usb_sndintpipe(psInterfaceAdapter->udev,
psInterfaceAdapter->sIntrOut.int_out_endpointAddr),
aucAbortPattern,
8,
&lenwritten,
5000);
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Sending Abort pattern down fails with status:%d..\n", status);
+ if (status)
return status;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "NOB Sent down :%d", lenwritten);
- }
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "NOB Sent down :%d", lenwritten);
/* mdelay(25); */
- timeout = jiffies + msecs_to_jiffies(50) ;
- while ( timeout > jiffies ) {
- itr++ ;
+ timeout = jiffies + msecs_to_jiffies(50);
+ while (time_after(timeout, jiffies)) {
+ itr++;
rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT));
if (0xbece3200 == (chip_id&~(0xF0)))
chip_id = chip_id&~(0xF0);
if (chip_id == Adapter->chip_id)
break;
}
- if (timeout < jiffies )
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Not able to read chip-id even after 25 msec");
+ if (time_before(timeout, jiffies))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "Not able to read chip-id even after 25 msec");
else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Number of completed iteration to read chip-id :%lu", itr);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "Number of completed iteration to"
+ "read chip-id :%lu", itr);
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(status));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
+ &Pattern, sizeof(status));
+ if (status)
return status;
- }
}
return status;
}
@@ -221,9 +225,10 @@ int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter)
{
ULONG Status = 0;
if (Adapter->bTriedToWakeUpFromlowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Wake up already attempted.. ignoring\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "Wake up already attempted.. ignoring\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing Low Power Mode Abort pattern to the Device\n");
Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern);
@@ -237,30 +242,33 @@ void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter)
INT Status = 0;
int bytes;
- if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ if (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
/* clear idlemode interrupt. */
uiRegVal = 0;
- Status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegVal, sizeof(uiRegVal));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Failed with err :%d", Status);
+ Status = wrmalt(Adapter,
+ DEBUG_INTERRUPT_GENERATOR_REGISTOR,
+ &uiRegVal, sizeof(uiRegVal));
+ if (Status)
return;
- }
}
- else {
+ else {
- /* clear Interrupt EP registers. */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
+/* clear Interrupt EP registers. */
+ bytes = rdmalt(Adapter,
+ DEVICE_INT_OUT_EP_REG0,
+ &uiRegVal, sizeof(uiRegVal));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
return;
}
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
+ bytes = rdmalt(Adapter,
+ DEVICE_INT_OUT_EP_REG1,
+ &uiRegVal, sizeof(uiRegVal));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
return;
}
}
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 3acdb58..94f3272 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -69,7 +69,7 @@ static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter)
static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter)
{
- unsigned long ulReg = 0;
+ u32 ulReg;
int bytes;
/* Program EP2 MAX_PKT_SIZE */
@@ -96,7 +96,7 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
/* Program TX EP as interrupt(Alternate Setting) */
- bytes = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+ bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32));
if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"reading of Tx EP failed\n");
@@ -119,18 +119,18 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
* Update EEPROM Version.
* Read 4 bytes from 508 and modify 511 and 510.
*/
- ReadBeceemEEPROM(Adapter, 0x1FC, (PUINT)&ulReg);
+ ReadBeceemEEPROM(Adapter, 0x1FC, &ulReg);
ulReg &= 0x0101FFFF;
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE);
/* Update length field if required. Also make the string NULL terminated. */
- ReadBeceemEEPROM(Adapter, 0xA8, (PUINT)&ulReg);
+ ReadBeceemEEPROM(Adapter, 0xA8, &ulReg);
if ((ulReg&0x00FF0000)>>16 > 0x30) {
ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE);
}
- ReadBeceemEEPROM(Adapter, 0x148, (PUINT)&ulReg);
+ ReadBeceemEEPROM(Adapter, 0x148, &ulReg);
if ((ulReg&0x00FF0000)>>16 > 0x30) {
ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE);
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
index f2973f5..1100817 100644
--- a/drivers/staging/bcm/InterfaceRx.c
+++ b/drivers/staging/bcm/InterfaceRx.c
@@ -1,11 +1,11 @@
#include "headers.h"
-static int SearchVcid(struct bcm_mini_adapter *Adapter,unsigned short usVcid)
+static int SearchVcid(struct bcm_mini_adapter *Adapter, unsigned short usVcid)
{
- int iIndex=0;
+ int iIndex = 0;
- for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--)
- if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
+ for (iIndex = (NO_OF_QUEUES-1); iIndex >= 0; iIndex--)
+ if (Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
return iIndex;
return NO_OF_QUEUES+1;
@@ -18,15 +18,14 @@ GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter)
struct bcm_usb_rcb *pRcb = NULL;
UINT index = 0;
- if((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == false))
- {
+ if ((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
+ (psIntfAdapter->psAdapter->StopAllXaction == false)) {
index = atomic_read(&psIntfAdapter->uCurrRcb);
pRcb = &psIntfAdapter->asUsbRcb[index];
pRcb->bUsed = TRUE;
- pRcb->psIntfAdapter= psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Got Rx desc %d used %d",
- index, atomic_read(&psIntfAdapter->uNumRcbUsed));
+ pRcb->psIntfAdapter = psIntfAdapter;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Got Rx desc %d used %d",
+ index, atomic_read(&psIntfAdapter->uNumRcbUsed));
index = (index + 1) % MAXIMUM_USB_RCB;
atomic_set(&psIntfAdapter->uCurrRcb, index);
atomic_inc(&psIntfAdapter->uNumRcbUsed);
@@ -40,9 +39,8 @@ static void read_bulk_callback(struct urb *urb)
struct sk_buff *skb = NULL;
bool bHeaderSupressionEnabled = false;
int QueueIndex = NO_OF_QUEUES + 1;
- UINT uiIndex=0;
+ UINT uiIndex = 0;
int process_done = 1;
- //int idleflag = 0 ;
struct bcm_usb_rcb *pRcb = (struct bcm_usb_rcb *)urb->context;
struct bcm_interface_adapter *psIntfAdapter = pRcb->psIntfAdapter;
struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
@@ -52,49 +50,40 @@ static void read_bulk_callback(struct urb *urb)
pr_info(PFX "%s: rx urb status %d length %d\n",
Adapter->dev->name, urb->status, urb->actual_length);
- if((Adapter->device_removed == TRUE) ||
- (TRUE == Adapter->bEndPointHalted) ||
- (0 == urb->actual_length)
- )
- {
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
+ if ((Adapter->device_removed == TRUE) ||
+ (TRUE == Adapter->bEndPointHalted) ||
+ (0 == urb->actual_length)) {
+ pRcb->bUsed = false;
+ atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
- if(urb->status != STATUS_SUCCESS)
- {
- if(urb->status == -EPIPE)
- {
- Adapter->bEndPointHalted = TRUE ;
+ if (urb->status != STATUS_SUCCESS) {
+ if (urb->status == -EPIPE) {
+ Adapter->bEndPointHalted = TRUE;
wake_up(&Adapter->tx_packet_wait_queue);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"Rx URB has got cancelled. status :%d", urb->status);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Rx URB has got cancelled. status :%d", urb->status);
}
pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- urb->status = STATUS_SUCCESS ;
- return ;
+ atomic_dec(&psIntfAdapter->uNumRcbUsed);
+ urb->status = STATUS_SUCCESS;
+ return;
}
- if(Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"device is going in low power mode while PMU option selected..hence rx packet should not be process");
- return ;
+ if (Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "device is going in low power mode while PMU option selected..hence rx packet should not be process");
+ return;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength);
- if(!pLeader->PLength)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength);
+ if (!pLeader->PLength) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0");
atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid);
- if(MAX_CNTL_PKT_SIZE < pLeader->PLength)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status, pLeader->PLength, pLeader->Vcid);
+ if (MAX_CNTL_PKT_SIZE < pLeader->PLength) {
if (netif_msg_rx_err(Adapter))
pr_info(PFX "%s: corrupted leader length...%d\n",
Adapter->dev->name, pLeader->PLength);
@@ -103,65 +92,58 @@ static void read_bulk_callback(struct urb *urb)
return;
}
- QueueIndex = SearchVcid( Adapter,pLeader->Vcid);
- if(QueueIndex < NO_OF_QUEUES)
- {
+ QueueIndex = SearchVcid(Adapter, pLeader->Vcid);
+ if (QueueIndex < NO_OF_QUEUES) {
bHeaderSupressionEnabled =
Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
bHeaderSupressionEnabled =
bHeaderSupressionEnabled & Adapter->bPHSEnabled;
}
- skb = dev_alloc_skb (pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);//2 //2 for allignment
- if(!skb)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet");
+ skb = dev_alloc_skb(pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);
+ if (!skb) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet");
atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
- /* If it is a control Packet, then call handle_bcm_packet ()*/
- if((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
- (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F)))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt...");
+ /* If it is a control Packet, then call handle_bcm_packet ()*/
+ if ((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
+ (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt...");
*(PUSHORT)skb->data = pLeader->Status;
- memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
- (sizeof(struct bcm_leader)), pLeader->PLength);
+ memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
+ (sizeof(struct bcm_leader)), pLeader->PLength);
skb->len = pLeader->PLength + sizeof(USHORT);
spin_lock(&Adapter->control_queue_lock);
- ENQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail,skb);
+ ENQUEUEPACKET(Adapter->RxControlHead, Adapter->RxControlTail, skb);
spin_unlock(&Adapter->control_queue_lock);
atomic_inc(&Adapter->cntrlpktCnt);
wake_up(&Adapter->process_rx_cntrlpkt);
- }
- else
- {
+ } else {
/*
- * Data Packet, Format a proper Ethernet Header
- * and give it to the stack
- */
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt...");
+ * Data Packet, Format a proper Ethernet Header
+ * and give it to the stack
+ */
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt...");
skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES);
memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer + sizeof(struct bcm_leader), pLeader->PLength);
skb->dev = Adapter->dev;
/* currently skb->len has extra ETH_HLEN bytes in the beginning */
- skb_put (skb, pLeader->PLength + ETH_HLEN);
- Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength;
- Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength);
+ skb_put(skb, pLeader->PLength + ETH_HLEN);
+ Adapter->PackInfo[QueueIndex].uiTotalRxBytes += pLeader->PLength;
+ Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes += pLeader->PLength;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength);
- if(netif_running(Adapter->dev))
- {
+ if (netif_running(Adapter->dev)) {
/* Moving ahead by ETH_HLEN to the data ptr as received from FW */
skb_pull(skb, ETH_HLEN);
PHSReceive(Adapter, pLeader->Vcid, skb, &skb->len,
- NULL,bHeaderSupressionEnabled);
+ NULL, bHeaderSupressionEnabled);
- if(!Adapter->PackInfo[QueueIndex].bEthCSSupport)
- {
+ if (!Adapter->PackInfo[QueueIndex].bEthCSSupport) {
skb_push(skb, ETH_HLEN);
memcpy(skb->data, skb->dev->dev_addr, 6);
@@ -169,29 +151,26 @@ static void read_bulk_callback(struct urb *urb)
(*(skb->data+11))++;
*(skb->data+12) = 0x08;
*(skb->data+13) = 0x00;
- pLeader->PLength+=ETH_HLEN;
+ pLeader->PLength += ETH_HLEN;
}
skb->protocol = eth_type_trans(skb, Adapter->dev);
process_done = netif_rx(skb);
- }
- else
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
+ } else {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
dev_kfree_skb(skb);
}
++Adapter->dev->stats.rx_packets;
Adapter->dev->stats.rx_bytes += pLeader->PLength;
- for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
- {
- if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1))
- && (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
+ for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) {
+ if ((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) &&
+ (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
Adapter->aRxPktSizeHist[uiIndex]++;
}
}
- Adapter->PrevNumRecvDescs++;
+ Adapter->PrevNumRecvDescs++;
pRcb->bUsed = false;
atomic_dec(&psIntfAdapter->uNumRcbUsed);
}
@@ -201,23 +180,18 @@ static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_us
struct urb *urb = pRcb->urb;
int retval = 0;
- usb_fill_bulk_urb(urb, psIntfAdapter->udev, usb_rcvbulkpipe(
- psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
- urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback,
- pRcb);
- if(false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend)
- {
+ usb_fill_bulk_urb(urb, psIntfAdapter->udev, usb_rcvbulkpipe(psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
+ urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback, pRcb);
+ if (false == psIntfAdapter->psAdapter->device_removed &&
+ false == psIntfAdapter->psAdapter->bEndPointHalted &&
+ false == psIntfAdapter->bSuspended &&
+ false == psIntfAdapter->bPreparingForBusSuspend) {
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "failed submitting read urb, error %d", retval);
- //if this return value is because of pipe halt. need to clear this.
- if(retval == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "failed submitting read urb, error %d", retval);
+ /* if this return value is because of pipe halt. need to clear this. */
+ if (retval == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
@@ -240,25 +214,20 @@ Return: TRUE - If Rx was successful.
Other - If an error occurred.
*/
-bool InterfaceRx (struct bcm_interface_adapter *psIntfAdapter)
+bool InterfaceRx(struct bcm_interface_adapter *psIntfAdapter)
{
USHORT RxDescCount = NUM_RX_DESC - atomic_read(&psIntfAdapter->uNumRcbUsed);
struct bcm_usb_rcb *pRcb = NULL;
-// RxDescCount = psIntfAdapter->psAdapter->CurrNumRecvDescs -
-// psIntfAdapter->psAdapter->PrevNumRecvDescs;
- while(RxDescCount)
- {
+ while (RxDescCount) {
pRcb = GetBulkInRcb(psIntfAdapter);
- if(pRcb == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer");
+ if (pRcb == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer");
return false;
}
- //atomic_inc(&psIntfAdapter->uNumRcbUsed);
ReceiveRcb(psIntfAdapter, pRcb);
RxDescCount--;
- }
+ }
return TRUE;
}
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
index b9c2784..ea7707b 100644
--- a/drivers/staging/bcm/InterfaceTx.c
+++ b/drivers/staging/bcm/InterfaceTx.c
@@ -3,26 +3,22 @@
/*this is transmit call-back(BULK OUT)*/
static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
- struct bcm_usb_tcb *pTcb= (struct bcm_usb_tcb *)urb->context;
+ struct bcm_usb_tcb *pTcb = (struct bcm_usb_tcb *)urb->context;
struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter;
struct bcm_link_request *pControlMsg = (struct bcm_link_request *)urb->transfer_buffer;
- struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter ;
- bool bpowerDownMsg = false ;
+ struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter;
+ bool bpowerDownMsg = false;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if (unlikely(netif_msg_tx_done(Adapter)))
- pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
+ if (unlikely(netif_msg_tx_done(Adapter)))
+ pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
- if(urb->status != STATUS_SUCCESS)
- {
- if(urb->status == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (urb->status != STATUS_SUCCESS) {
+ if (urb->status == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Tx URB has got cancelled. status :%d", urb->status);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx URB has got cancelled. status :%d", urb->status);
}
}
@@ -31,69 +27,59 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
- if(TRUE == psAdapter->bPreparingForLowPowerMode)
- {
-
- if(((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
- (pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)))
+ if (TRUE == psAdapter->bPreparingForLowPowerMode) {
- {
- bpowerDownMsg = TRUE ;
- //This covers the bus err while Idle Request msg sent down.
- if(urb->status != STATUS_SUCCESS)
- {
- psAdapter->bPreparingForLowPowerMode = false ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Idle Mode Request msg failed to reach to Modem");
- //Signalling the cntrl pkt path in Ioctl
+ if (((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
+ (pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) {
+ bpowerDownMsg = TRUE;
+ /* This covers the bus err while Idle Request msg sent down. */
+ if (urb->status != STATUS_SUCCESS) {
+ psAdapter->bPreparingForLowPowerMode = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Idle Mode Request msg failed to reach to Modem");
+ /* Signalling the cntrl pkt path in Ioctl */
wake_up(&psAdapter->lowpower_mode_wait_queue);
StartInterruptUrb(psIntfAdapter);
goto err_exit;
}
- if(psAdapter->bDoSuspend == false)
- {
+ if (psAdapter->bDoSuspend == false) {
psAdapter->IdleMode = TRUE;
- //since going in Idle mode completed hence making this var false;
- psAdapter->bPreparingForLowPowerMode = false ;
+ /* since going in Idle mode completed hence making this var false */
+ psAdapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State...");
- //Signalling the cntrl pkt path in Ioctl
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State...");
+ /* Signalling the cntrl pkt path in Ioctl*/
wake_up(&psAdapter->lowpower_mode_wait_queue);
}
- }
- else if((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) &&
+ } else if ((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) &&
(pControlMsg->szData[0] == LINK_UP_ACK) &&
(pControlMsg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) &&
- (pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER))
- {
- //This covers the bus err while shutdown Request msg sent down.
- if(urb->status != STATUS_SUCCESS)
- {
- psAdapter->bPreparingForLowPowerMode = false ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Shutdown Request Msg failed to reach to Modem");
- //Signalling the cntrl pkt path in Ioctl
+ (pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) {
+ /* This covers the bus err while shutdown Request msg sent down. */
+ if (urb->status != STATUS_SUCCESS) {
+ psAdapter->bPreparingForLowPowerMode = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Shutdown Request Msg failed to reach to Modem");
+ /* Signalling the cntrl pkt path in Ioctl */
wake_up(&psAdapter->lowpower_mode_wait_queue);
StartInterruptUrb(psIntfAdapter);
goto err_exit;
}
- bpowerDownMsg = TRUE ;
- if(psAdapter->bDoSuspend == false)
- {
+ bpowerDownMsg = TRUE;
+ if (psAdapter->bDoSuspend == false) {
psAdapter->bShutStatus = TRUE;
- //since going in shutdown mode completed hence making this var false;
- psAdapter->bPreparingForLowPowerMode = false ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Host Entered in shutdown Mode State...");
- //Signalling the cntrl pkt path in Ioctl
+ /* since going in shutdown mode completed hence making this var false */
+ psAdapter->bPreparingForLowPowerMode = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in shutdown Mode State...");
+ /* Signalling the cntrl pkt path in Ioctl */
wake_up(&psAdapter->lowpower_mode_wait_queue);
}
}
- if(psAdapter->bDoSuspend && bpowerDownMsg)
- {
- //issuing bus suspend request
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Issuing the Bus suspend request to USB stack");
+ if (psAdapter->bDoSuspend && bpowerDownMsg) {
+ /* issuing bus suspend request */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Issuing the Bus suspend request to USB stack");
psIntfAdapter->bPreparingForBusSuspend = TRUE;
schedule_work(&psIntfAdapter->usbSuspendWork);
@@ -101,9 +87,9 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
}
-err_exit :
+err_exit:
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ urb->transfer_buffer, urb->transfer_dma);
}
@@ -112,14 +98,13 @@ static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAda
struct bcm_usb_tcb *pTcb = NULL;
UINT index = 0;
- if((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction ==false))
- {
+ if ((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
+ (psIntfAdapter->psAdapter->StopAllXaction == false)) {
index = atomic_read(&psIntfAdapter->uCurrTcb);
pTcb = &psIntfAdapter->asUsbTcb[index];
pTcb->bUsed = TRUE;
- pTcb->psIntfAdapter= psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d",
+ pTcb->psIntfAdapter = psIntfAdapter;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d",
index, atomic_read(&psIntfAdapter->uNumTcbUsed));
index = (index + 1) % MAXIMUM_USB_TCB;
atomic_set(&psIntfAdapter->uCurrTcb, index);
@@ -135,44 +120,37 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u
int retval = 0;
urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
- GFP_ATOMIC, &urb->transfer_dma);
- if (!urb->transfer_buffer)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
+ GFP_ATOMIC, &urb->transfer_dma);
+ if (!urb->transfer_buffer) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
return -ENOMEM;
}
memcpy(urb->transfer_buffer, data, len);
urb->transfer_buffer_length = len;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n");
- //For T3B,INT OUT end point will be used as bulk out end point
- if((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE))
- {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n");
+ /* For T3B,INT OUT end point will be used as bulk out end point */
+ if ((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE)) {
usb_fill_int_urb(urb, psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe,
+ psIntfAdapter->sBulkOut.bulk_out_pipe,
urb->transfer_buffer, len, write_bulk_callback, pTcb,
psIntfAdapter->sBulkOut.int_out_interval);
- }
- else
- {
+ } else {
usb_fill_bulk_urb(urb, psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_pipe,
urb->transfer_buffer, len, write_bulk_callback, pTcb);
}
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */
- if(false == psIntfAdapter->psAdapter->device_removed &&
+ if (false == psIntfAdapter->psAdapter->device_removed &&
false == psIntfAdapter->psAdapter->bEndPointHalted &&
false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend)
- {
+ false == psIntfAdapter->bPreparingForBusSuspend) {
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval);
- if(retval == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval);
+ if (retval == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
}
@@ -182,13 +160,12 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u
int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
{
- struct bcm_usb_tcb *pTcb= NULL;
+ struct bcm_usb_tcb *pTcb = NULL;
struct bcm_interface_adapter *psIntfAdapter = arg;
- pTcb= GetBulkOutTcb(psIntfAdapter);
- if(pTcb == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet");
+ pTcb = GetBulkOutTcb(psIntfAdapter);
+ if (pTcb == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet");
return -EFAULT;
}
return TransmitTcb(psIntfAdapter, pTcb, data, len);
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index 892ebc6..afc7bcc 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -1280,11 +1280,11 @@ static int phs_decompress(unsigned char *in_buf,
if (bit == SUPPRESS) {
*out_buf = *phsf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d ouput %d",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d output %d",
phss, *phsf, *out_buf);
} else {
*out_buf = *in_buf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d ouput %d",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d output %d",
phss, *in_buf, *out_buf);
in_buf++;
size++;
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 1609a2b..0727599 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -24,7 +24,7 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
+static bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
{
UCHAR ucLoopIndex = 0;
@@ -58,7 +58,7 @@ bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSr
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
+static bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
{
UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -91,7 +91,7 @@ bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulD
*
* Returns - TRUE(If address matches) else FAIL.
**************************************************************************/
-bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
+static bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 9e5f955..fca164f 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -1355,67 +1355,6 @@ BeceemFlashBulkWriteStatus_EXIT:
}
/*
- * Procedure: PropagateCalParamsFromEEPROMToMemory
- *
- * Description: Dumps the calibration section of EEPROM to DDR.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-int PropagateCalParamsFromEEPROMToMemory(struct bcm_mini_adapter *Adapter)
-{
- PCHAR pBuff = kmalloc(BUFFER_4K, GFP_KERNEL);
- unsigned int uiEepromSize = 0;
- unsigned int uiIndex = 0;
- unsigned int uiBytesToCopy = 0;
- unsigned int uiCalStartAddr = EEPROM_CALPARAM_START;
- unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC;
- unsigned int value;
- int Status = 0;
-
- if (!pBuff)
- return -ENOMEM;
-
- if (0 != BeceemEEPROMBulkRead(Adapter, &uiEepromSize, EEPROM_SIZE_OFFSET, 4)) {
- kfree(pBuff);
- return -1;
- }
-
- uiEepromSize >>= 16;
- if (uiEepromSize > 1024 * 1024) {
- kfree(pBuff);
- return -1;
- }
-
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
-
- while (uiBytesToCopy) {
- if (0 != BeceemEEPROMBulkRead(Adapter, (PUINT)pBuff, uiCalStartAddr, uiBytesToCopy)) {
- Status = -1;
- break;
- }
- wrm(Adapter, uiMemoryLoc, (PCHAR)(((PULONG)pBuff) + uiIndex), uiBytesToCopy);
- uiMemoryLoc += uiBytesToCopy;
- uiEepromSize -= uiBytesToCopy;
- uiCalStartAddr += uiBytesToCopy;
- uiIndex += uiBytesToCopy / 4;
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
-
- }
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
- kfree(pBuff);
-
- return Status;
-}
-
-/*
* Procedure: PropagateCalParamsFromFlashToMemory
*
* Description: Dumps the calibration section of EEPROM to DDR.
@@ -2873,7 +2812,7 @@ int BcmFlash2xBulkRead(struct bcm_mini_adapter *Adapter,
SectionStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
if (SectionStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exixt in Flash 2.x Map ", eFlash2xSectionVal);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash 2.x Map ", eFlash2xSectionVal);
return -EINVAL;
}
@@ -2936,7 +2875,7 @@ int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter,
FlashSectValStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectVal);
if (FlashSectValStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exixt in Flash Map 2.x", eFlash2xSectVal);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash Map 2.x", eFlash2xSectVal);
return -EINVAL;
}
@@ -3911,7 +3850,7 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2
uiNumOfBytes = psFlash2xReadWrite->numOfBytes;
if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exixt in Flash", psFlash2xReadWrite->Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exist in Flash", psFlash2xReadWrite->Section);
return false;
}
uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section);
@@ -3944,6 +3883,15 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "End offset :%x\n", uiSectEndOffset);
+ /* psFlash2xReadWrite->offset and uiNumOfBytes are user controlled and can lead to integer overflows */
+ if (psFlash2xReadWrite->offset > uiSectEndOffset) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
+ return false;
+ }
+ if (uiNumOfBytes > uiSectEndOffset) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
+ return false;
+ }
/* Checking the boundary condition */
if ((uiSectStartOffset + psFlash2xReadWrite->offset + uiNumOfBytes) <= uiSectEndOffset)
return TRUE;
@@ -4530,13 +4478,13 @@ int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section
int Status = false;
if (IsSectionExistInFlash(Adapter, Section) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exixt", Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exist", Section);
return false;
}
offset = BcmGetSectionValStartOffset(Adapter, Section);
if (offset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exixt", Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exist", Section);
return false;
}
diff --git a/drivers/staging/btmtk_usb/Kconfig b/drivers/staging/btmtk_usb/Kconfig
deleted file mode 100644
index a425ebd..0000000
--- a/drivers/staging/btmtk_usb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config USB_BTMTK
- tristate "Mediatek Bluetooth support"
- depends on USB && BT && m
- ---help---
- Say Y here if you wish to control a MTK USB Bluetooth.
-
- This option depends on 'USB' support being enabled
-
- To compile this driver as a module, choose M here: the
- module will be called btmtk_usb.
-
diff --git a/drivers/staging/btmtk_usb/Makefile b/drivers/staging/btmtk_usb/Makefile
deleted file mode 100644
index 4d6c9d7..0000000
--- a/drivers/staging/btmtk_usb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_USB_BTMTK) += btmtk_usb.o
diff --git a/drivers/staging/btmtk_usb/README b/drivers/staging/btmtk_usb/README
deleted file mode 100644
index c046c8e..0000000
--- a/drivers/staging/btmtk_usb/README
+++ /dev/null
@@ -1,14 +0,0 @@
--build driver modules
- make
-
--install driver modules
- make install
-
--remove driver modules
- make clean
-
--dynamic debug message
- turn on CONFIG_DYNAMIC_DEBUG compiler flag for current kernel
- mount -t debugfs none /sys/kernel/debug/
- echo "module module_name +p" > /sys/kernel/debug/dynamic_debug/control(turn on debug messages, module name such as btmtk_usb)
- echo "module module_name -p" > /sys/kernel/debug/dynamic_debug/control(turn off debug messages, module name such as btmtk_usb)
diff --git a/drivers/staging/btmtk_usb/TODO b/drivers/staging/btmtk_usb/TODO
deleted file mode 100644
index a71d129..0000000
--- a/drivers/staging/btmtk_usb/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl clean
- - determine if the driver should not be using a duplicate
- version of the usb-bluetooth interface code, but should
- be merged into the drivers/bluetooth/ directory and
- infrastructure instead.
- - review by the bluetooth developer community
-
-Please send any patches for this driver to Yu-Chen, Cho <acho@suse.com> and
-jay.hung@mediatek.com
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
deleted file mode 100644
index 9a5ebd6..0000000
--- a/drivers/staging/btmtk_usb/btmtk_usb.c
+++ /dev/null
@@ -1,1810 +0,0 @@
-/*
- * MediaTek Bluetooth USB Driver
- *
- * Copyright (C) 2013, MediaTek co.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- * or on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <linux/completion.h>
-#include <linux/firmware.h>
-#include <linux/usb.h>
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-
-#include "btmtk_usb.h"
-
-#define VERSION "1.0.4"
-#define MT7650_FIRMWARE "mt7650.bin"
-#define MT7662_FIRMWARE "mt7662.bin"
-
-static struct usb_driver btmtk_usb_driver;
-
-
-static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *);
-static int btmtk_usb_load_fw(struct btmtk_usb_data *);
-
-static void hex_dump(char *str, u8 *src_buf, u32 src_buf_len)
-{
- unsigned char *pt;
- int x;
-
- pt = src_buf;
-
- BT_DBG("%s: %p, len = %d\n", str, src_buf, src_buf_len);
-
- for (x = 0; x < src_buf_len; x++) {
- if (x % 16 == 0)
- BT_DBG("0x%04x : ", x);
- BT_DBG("%02x ", ((unsigned char)pt[x]));
- if (x % 16 == 15)
- BT_DBG("\n");
- }
-
- BT_DBG("\n");
-}
-
-static int btmtk_usb_reset(struct usb_device *udev)
-{
- int ret;
-
- BT_DBG("%s\n", __func__);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
- DEVICE_VENDOR_REQUEST_OUT, 0x01, 0x00,
- NULL, 0x00, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d)\n", __func__, ret);
- return ret;
- }
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-static int btmtk_usb_io_read32(struct btmtk_usb_data *data, u32 reg, u32 *val)
-{
- u8 request = data->r_request;
- struct usb_device *udev = data->udev;
- int ret;
- __le32 val_le;
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request,
- DEVICE_VENDOR_REQUEST_IN, 0x0, reg, data->io_buf,
- 4, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- *val = 0xffffffff;
- BT_ERR("%s error(%d), reg=%x, value=%x\n",
- __func__, ret, reg, *val);
- return ret;
- }
-
- memmove(&val_le, data->io_buf, 4);
-
- *val = le32_to_cpu(val_le);
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-static int btmtk_usb_io_write32(struct btmtk_usb_data *data, u32 reg, u32 val)
-{
- u16 value, index;
- u8 request = data->w_request;
- struct usb_device *udev = data->udev;
- int ret;
-
- index = (u16)reg;
- value = val & 0x0000ffff;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request,
- DEVICE_VENDOR_REQUEST_OUT, value, index,
- NULL, 0, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d), reg=%x, value=%x\n",
- __func__, ret, reg, val);
- return ret;
- }
-
- index = (u16)(reg + 2);
- value = (val & 0xffff0000) >> 16;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- request, DEVICE_VENDOR_REQUEST_OUT,
- value, index, NULL, 0, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d), reg=%x, value=%x\n",
- __func__, ret, reg, val);
- return ret;
- }
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-static int btmtk_usb_switch_iobase(struct btmtk_usb_data *data, int base)
-{
- int ret = 0;
-
- switch (base) {
- case SYSCTL:
- data->w_request = 0x42;
- data->r_request = 0x47;
- break;
- case WLAN:
- data->w_request = 0x02;
- data->r_request = 0x07;
- break;
-
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static void btmtk_usb_cap_init(struct btmtk_usb_data *data)
-{
- const struct firmware *firmware;
- struct usb_device *udev = data->udev;
- int ret;
-
- btmtk_usb_io_read32(data, 0x00, &data->chip_id);
-
- BT_DBG("chip id = %x\n", data->chip_id);
-
- if (is_mt7630(data) || is_mt7650(data)) {
- data->need_load_fw = 1;
- data->need_load_rom_patch = 0;
- ret = request_firmware(&firmware, MT7650_FIRMWARE, &udev->dev);
- if (ret < 0) {
- if (ret == -ENOENT) {
- BT_ERR("Firmware file \"%s\" not found\n",
- MT7650_FIRMWARE);
- } else {
- BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
- MT7650_FIRMWARE, ret);
- }
- } else {
- BT_DBG("Firmware file \"%s\" Found\n",
- MT7650_FIRMWARE);
- /* load firmware here */
- data->firmware = firmware;
- btmtk_usb_load_fw(data);
- }
- release_firmware(firmware);
- } else if (is_mt7632(data) || is_mt7662(data)) {
- data->need_load_fw = 0;
- data->need_load_rom_patch = 1;
- data->rom_patch_offset = 0x90000;
- ret = request_firmware(&firmware, MT7662_FIRMWARE, &udev->dev);
- if (ret < 0) {
- if (ret == -ENOENT) {
- BT_ERR("Firmware file \"%s\" not found\n",
- MT7662_FIRMWARE);
- } else {
- BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
- MT7662_FIRMWARE, ret);
- }
- } else {
- BT_DBG("Firmware file \"%s\" Found\n", MT7662_FIRMWARE);
- /* load rom patch here */
- data->firmware = firmware;
- data->rom_patch_len = firmware->size;
- btmtk_usb_load_rom_patch(data);
- }
- release_firmware(firmware);
- } else {
- BT_ERR("unknow chip(%x)\n", data->chip_id);
- }
-}
-
-static u16 checksume16(u8 *pData, int len)
-{
- int sum = 0;
-
- while (len > 1) {
- sum += *((u16 *)pData);
-
- pData = pData + 2;
-
- if (sum & 0x80000000)
- sum = (sum & 0xFFFF) + (sum >> 16);
-
- len -= 2;
- }
-
- if (len)
- sum += *((u8 *)pData);
-
- while (sum >> 16)
- sum = (sum & 0xFFFF) + (sum >> 16);
-
- return ~sum;
-}
-
-static int btmtk_usb_chk_crc(struct btmtk_usb_data *data, u32 checksum_len)
-{
- int ret = 0;
- struct usb_device *udev = data->udev;
-
- BT_DBG("%s\n", __func__);
-
- memmove(data->io_buf, &data->rom_patch_offset, 4);
- memmove(&data->io_buf[4], &checksum_len, 4);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x1,
- DEVICE_VENDOR_REQUEST_IN, 0x20, 0x00, data->io_buf,
- 8, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0)
- BT_ERR("%s error(%d)\n", __func__, ret);
-
- return ret;
-}
-
-static u16 btmtk_usb_get_crc(struct btmtk_usb_data *data)
-{
- int ret = 0;
- struct usb_device *udev = data->udev;
- u16 crc, count = 0;
- __le16 crc_le;
-
- BT_DBG("%s\n", __func__);
-
- while (1) {
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0x01, DEVICE_VENDOR_REQUEST_IN,
- 0x21, 0x00, data->io_buf, 2,
- CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- crc = 0xFFFF;
- BT_ERR("%s error(%d)\n", __func__, ret);
- }
-
- memmove(&crc_le, data->io_buf, 2);
-
- crc = le16_to_cpu(crc_le);
-
- if (crc != 0xFFFF)
- break;
-
- mdelay(100);
-
- if (count++ > 100) {
- BT_ERR("Query CRC over %d times\n", count);
- break;
- }
- }
-
- return crc;
-}
-
-static int btmtk_usb_reset_wmt(struct btmtk_usb_data *data)
-{
- int ret = 0;
-
- /* reset command */
- u8 cmd[8] = {0x6F, 0xFC, 0x05, 0x01, 0x07, 0x01, 0x00, 0x04};
-
- memmove(data->io_buf, cmd, 8);
-
- BT_DBG("%s\n", __func__);
-
- ret = usb_control_msg(data->udev, usb_sndctrlpipe(data->udev, 0), 0x01,
- DEVICE_CLASS_REQUEST_OUT, 0x12, 0x00,
- data->io_buf, 8, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret)
- BT_ERR("%s:(%d)\n", __func__, ret);
-
- return ret;
-}
-
-static void load_rom_patch_complete(struct urb *urb)
-{
-
- struct completion *sent_to_mcu_done = (struct completion *)urb->context;
-
- complete(sent_to_mcu_done);
-}
-
-static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *data)
-{
- u32 loop = 0;
- u32 value;
- s32 sent_len;
- int ret = 0, total_checksum = 0;
- struct urb *urb;
- u32 patch_len = 0;
- u32 cur_len = 0;
- dma_addr_t data_dma;
- struct completion sent_to_mcu_done;
- int first_block = 1;
- unsigned char phase;
- void *buf;
- char *pos;
- unsigned int pipe;
- pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
-
- if (!data->firmware) {
- BT_ERR("%s:please assign a rom patch\n", __func__);
- return -1;
- }
-
-load_patch_protect:
- btmtk_usb_switch_iobase(data, WLAN);
- btmtk_usb_io_read32(data, SEMAPHORE_03, &value);
- loop++;
-
- if (((value & 0x01) == 0x00) && (loop < 600)) {
- mdelay(1);
- goto load_patch_protect;
- }
-
- btmtk_usb_io_write32(data, 0x1004, 0x2c);
-
- btmtk_usb_switch_iobase(data, SYSCTL);
-
- btmtk_usb_io_write32(data, 0x1c, 0x30);
-
- /* Enable USB_DMA_CFG */
- btmtk_usb_io_write32(data, 0x9018, 0x00c00020);
-
- btmtk_usb_switch_iobase(data, WLAN);
-
- /* check ROM patch if upgrade */
- btmtk_usb_io_read32(data, COM_REG0, &value);
-
- if ((value & 0x02) == 0x02)
- goto error0;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
-
- if (!urb) {
- ret = -ENOMEM;
- goto error0;
- }
-
- buf = usb_alloc_coherent(data->udev, UPLOAD_PATCH_UNIT,
- GFP_ATOMIC, &data_dma);
-
- if (!buf) {
- ret = -ENOMEM;
- goto error1;
- }
-
- pos = buf;
- BT_DBG("loading rom patch");
-
- init_completion(&sent_to_mcu_done);
-
- cur_len = 0x00;
- patch_len = data->rom_patch_len - PATCH_INFO_SIZE;
-
- /* loading rom patch */
- while (1) {
- s32 sent_len_max = UPLOAD_PATCH_UNIT - PATCH_HEADER_SIZE;
- sent_len = min_t(s32, (patch_len - cur_len), sent_len_max);
-
- BT_DBG("patch_len = %d\n", patch_len);
- BT_DBG("cur_len = %d\n", cur_len);
- BT_DBG("sent_len = %d\n", sent_len);
-
- if (sent_len <= 0)
- break;
-
- if (first_block == 1) {
- if (sent_len < sent_len_max)
- phase = PATCH_PHASE3;
- else
- phase = PATCH_PHASE1;
- first_block = 0;
- } else if (sent_len == sent_len_max) {
- phase = PATCH_PHASE2;
- } else {
- phase = PATCH_PHASE3;
- }
-
- /* prepare HCI header */
- pos[0] = 0x6F;
- pos[1] = 0xFC;
- pos[2] = (sent_len + 5) & 0xFF;
- pos[3] = ((sent_len + 5) >> 8) & 0xFF;
-
- /* prepare WMT header */
- pos[4] = 0x01;
- pos[5] = 0x01;
- pos[6] = (sent_len + 1) & 0xFF;
- pos[7] = ((sent_len + 1) >> 8) & 0xFF;
-
- pos[8] = phase;
-
- memcpy(&pos[9],
- data->firmware->data + PATCH_INFO_SIZE + cur_len,
- sent_len);
-
- BT_DBG("sent_len + PATCH_HEADER_SIZE = %d, phase = %d\n",
- sent_len + PATCH_HEADER_SIZE, phase);
-
- usb_fill_bulk_urb(urb,
- data->udev,
- pipe,
- buf,
- sent_len + PATCH_HEADER_SIZE,
- load_rom_patch_complete,
- &sent_to_mcu_done);
-
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (ret)
- goto error2;
-
- if (!wait_for_completion_timeout(&sent_to_mcu_done,
- msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload rom_patch timeout\n");
- goto error2;
- }
-
- BT_DBG(".");
-
- mdelay(200);
-
- cur_len += sent_len;
-
- }
-
- total_checksum = checksume16(
- (u8 *)data->firmware->data + PATCH_INFO_SIZE,
- patch_len);
-
- BT_DBG("Send checksum req..\n");
-
- btmtk_usb_chk_crc(data, patch_len);
-
- mdelay(20);
-
- if (total_checksum != btmtk_usb_get_crc(data)) {
- BT_ERR("checksum fail!, local(0x%x) <> fw(0x%x)\n",
- total_checksum, btmtk_usb_get_crc(data));
- ret = -1;
- goto error2;
- }
-
- mdelay(20);
-
- ret = btmtk_usb_reset_wmt(data);
-
- mdelay(20);
-
-error2:
- usb_free_coherent(data->udev, UPLOAD_PATCH_UNIT, buf, data_dma);
-error1:
- usb_free_urb(urb);
-error0:
- btmtk_usb_io_write32(data, SEMAPHORE_03, 0x1);
- return ret;
-}
-
-
-static int load_fw_iv(struct btmtk_usb_data *data)
-{
- int ret;
- struct usb_device *udev = data->udev;
- char *buf = kmalloc(64, GFP_ATOMIC);
-
- memmove(buf, data->firmware->data + 32, 64);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
- DEVICE_VENDOR_REQUEST_OUT, 0x12, 0x0, buf, 64,
- CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d) step4\n", __func__, ret);
- kfree(buf);
- return ret;
- }
-
- if (ret > 0)
- ret = 0;
-
- kfree(buf);
-
- return ret;
-}
-
-static void load_fw_complete(struct urb *urb)
-{
-
- struct completion *sent_to_mcu_done = (struct completion *)urb->context;
-
- complete(sent_to_mcu_done);
-}
-
-static int btmtk_usb_load_fw(struct btmtk_usb_data *data)
-{
- struct usb_device *udev = data->udev;
- struct urb *urb;
- void *buf;
- u32 cur_len = 0;
- u32 packet_header = 0;
- __le32 packet_header_le;
- u32 value;
- u32 ilm_len = 0, dlm_len = 0;
- u16 fw_ver, build_ver;
- u32 loop = 0;
- dma_addr_t data_dma;
- int ret = 0, sent_len;
- struct completion sent_to_mcu_done;
- unsigned int pipe;
- pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
-
- if (!data->firmware) {
- BT_ERR("%s:please assign a fw\n", __func__);
- return -1;
- }
-
- BT_DBG("bulk_tx_ep = %x\n", data->bulk_tx_ep->bEndpointAddress);
-
-loadfw_protect:
- btmtk_usb_switch_iobase(data, WLAN);
- btmtk_usb_io_read32(data, SEMAPHORE_00, &value);
- loop++;
-
- if (((value & 0x1) == 0) && (loop < 10000))
- goto loadfw_protect;
-
- /* check MCU if ready */
- btmtk_usb_io_read32(data, COM_REG0, &value);
-
- if ((value & 0x01) == 0x01)
- goto error0;
-
- /* Enable MPDMA TX and EP2 load FW mode */
- btmtk_usb_io_write32(data, 0x238, 0x1c000000);
-
- btmtk_usb_reset(udev);
- mdelay(100);
-
- ilm_len = (*(data->firmware->data + 3) << 24)
- | (*(data->firmware->data + 2) << 16)
- | (*(data->firmware->data + 1) << 8)
- | (*data->firmware->data);
-
- dlm_len = (*(data->firmware->data + 7) << 24)
- | (*(data->firmware->data + 6) << 16)
- | (*(data->firmware->data + 5) << 8)
- | (*(data->firmware->data + 4));
-
- fw_ver = (*(data->firmware->data + 11) << 8) |
- (*(data->firmware->data + 10));
-
- build_ver = (*(data->firmware->data + 9) << 8) |
- (*(data->firmware->data + 8));
-
- BT_DBG("fw version:%d.%d.%02d ",
- (fw_ver & 0xf000) >> 8,
- (fw_ver & 0x0f00) >> 8,
- (fw_ver & 0x00ff));
-
- BT_DBG("build:%x\n", build_ver);
-
- BT_DBG("build Time =");
-
- for (loop = 0; loop < 16; loop++)
- BT_DBG("%c", *(data->firmware->data + 16 + loop));
-
- BT_DBG("\n");
-
- BT_DBG("ILM length = %d(bytes)\n", ilm_len);
- BT_DBG("DLM length = %d(bytes)\n", dlm_len);
-
- btmtk_usb_switch_iobase(data, SYSCTL);
-
- /* U2M_PDMA rx_ring_base_ptr */
- btmtk_usb_io_write32(data, 0x790, 0x400230);
-
- /* U2M_PDMA rx_ring_max_cnt */
- btmtk_usb_io_write32(data, 0x794, 0x1);
-
- /* U2M_PDMA cpu_idx */
- btmtk_usb_io_write32(data, 0x798, 0x1);
-
- /* U2M_PDMA enable */
- btmtk_usb_io_write32(data, 0x704, 0x44);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
-
- if (!urb) {
- ret = -ENOMEM;
- goto error1;
- }
-
- buf = usb_alloc_coherent(udev, 14592, GFP_ATOMIC, &data_dma);
-
- if (!buf) {
- ret = -ENOMEM;
- goto error2;
- }
-
- BT_DBG("loading fw");
-
- init_completion(&sent_to_mcu_done);
-
- btmtk_usb_switch_iobase(data, SYSCTL);
-
- cur_len = 0x40;
-
- /* Loading ILM */
- while (1) {
- sent_len = min_t(s32, (ilm_len - cur_len), 14336);
-
- if (sent_len > 0) {
- packet_header &= ~(0xffffffff);
- packet_header |= (sent_len << 16);
- packet_header_le = cpu_to_le32(packet_header);
-
- memmove(buf, &packet_header_le, 4);
- memmove(buf + 4, data->firmware->data + 32 + cur_len,
- sent_len);
-
- /* U2M_PDMA descriptor */
- btmtk_usb_io_write32(data, 0x230, cur_len);
-
- while ((sent_len % 4) != 0)
- sent_len++;
-
- /* U2M_PDMA length */
- btmtk_usb_io_write32(data, 0x234, sent_len << 16);
-
- usb_fill_bulk_urb(urb,
- udev,
- pipe,
- buf,
- sent_len + 4,
- load_fw_complete,
- &sent_to_mcu_done);
-
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (ret)
- goto error3;
-
- if (!wait_for_completion_timeout(&sent_to_mcu_done,
- msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload ilm fw timeout\n");
- goto error3;
- }
-
- BT_DBG(".");
-
- mdelay(200);
-
- cur_len += sent_len;
- } else {
- break;
- }
- }
-
- init_completion(&sent_to_mcu_done);
- cur_len = 0x00;
-
- /* Loading DLM */
- while (1) {
- sent_len = min_t(s32, (dlm_len - cur_len), 14336);
-
- if (sent_len <= 0)
- break;
-
- packet_header &= ~(0xffffffff);
- packet_header |= (sent_len << 16);
- packet_header_le = cpu_to_le32(packet_header);
-
- memmove(buf, &packet_header_le, 4);
- memmove(buf + 4,
- data->firmware->data + 32 + ilm_len + cur_len,
- sent_len);
-
- /* U2M_PDMA descriptor */
- btmtk_usb_io_write32(data, 0x230, 0x80000 + cur_len);
-
- while ((sent_len % 4) != 0) {
- BT_DBG("sent_len is not divided by 4\n");
- sent_len++;
- }
-
- /* U2M_PDMA length */
- btmtk_usb_io_write32(data, 0x234, sent_len << 16);
-
- usb_fill_bulk_urb(urb,
- udev,
- pipe,
- buf,
- sent_len + 4,
- load_fw_complete,
- &sent_to_mcu_done);
-
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (ret)
- goto error3;
-
- if (!wait_for_completion_timeout(&sent_to_mcu_done,
- msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload dlm fw timeout\n");
- goto error3;
- }
-
- BT_DBG(".");
-
- mdelay(500);
-
- cur_len += sent_len;
-
- }
-
- /* upload 64bytes interrupt vector */
- ret = load_fw_iv(data);
- mdelay(100);
-
- btmtk_usb_switch_iobase(data, WLAN);
-
- /* check MCU if ready */
- loop = 0;
-
- do {
- btmtk_usb_io_read32(data, COM_REG0, &value);
-
- if (value == 0x01)
- break;
-
- mdelay(10);
- loop++;
- } while (loop <= 100);
-
- if (loop > 1000) {
- BT_ERR("wait for 100 times\n");
- ret = -ENODEV;
- }
-
-error3:
- usb_free_coherent(udev, 14592, buf, data_dma);
-error2:
- usb_free_urb(urb);
-error1:
- /* Disbale load fw mode */
- btmtk_usb_io_read32(data, 0x238, &value);
- value = value & ~(0x10000000);
- btmtk_usb_io_write32(data, 0x238, value);
-error0:
- btmtk_usb_io_write32(data, SEMAPHORE_00, 0x1);
- return ret;
-}
-
-static int inc_tx(struct btmtk_usb_data *data)
-{
- unsigned long flags;
- int rv;
-
- spin_lock_irqsave(&data->txlock, flags);
- rv = test_bit(BTUSB_SUSPENDING, &data->flags);
- if (!rv)
- data->tx_in_flight++;
- spin_unlock_irqrestore(&data->txlock, flags);
-
- return rv;
-}
-
-static void btmtk_usb_intr_complete(struct urb *urb)
-{
- struct hci_dev *hdev = urb->context;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return;
-
- if (urb->status == 0) {
- hdev->stat.byte_rx += urb->actual_length;
-
- hex_dump("hci event", urb->transfer_buffer, urb->actual_length);
-
- if (hci_recv_fragment(hdev, HCI_EVENT_PKT,
- urb->transfer_buffer,
- urb->actual_length) < 0) {
- BT_ERR("%s corrupted event packet", hdev->name);
- hdev->stat.err_rx++;
- }
- }
-
- if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
- return;
-
- usb_mark_last_busy(data->udev);
- usb_anchor_urb(urb, &data->intr_anchor);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (err < 0) {
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static int btmtk_usb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct urb *urb;
- unsigned char *buf;
- unsigned int pipe;
- int err, size;
-
- BT_DBG("%s\n", __func__);
-
- if (!data->intr_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(0, mem_flags);
- if (!urb)
- return -ENOMEM;
-
- size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
-
- buf = kmalloc(size, mem_flags);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress);
-
- usb_fill_int_urb(urb, data->udev, pipe, buf, size,
- btmtk_usb_intr_complete, hdev,
- data->intr_ep->bInterval);
-
- urb->transfer_flags |= URB_FREE_BUFFER;
-
- usb_anchor_urb(urb, &data->intr_anchor);
-
- err = usb_submit_urb(urb, mem_flags);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-
-}
-
-static void btmtk_usb_bulk_in_complete(struct urb *urb)
-{
- struct hci_dev *hdev = urb->context;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s:%s urb %p status %d count %d", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return;
-
- if (urb->status == 0) {
- hdev->stat.byte_rx += urb->actual_length;
-
- if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT,
- urb->transfer_buffer,
- urb->actual_length) < 0) {
- BT_ERR("%s corrupted ACL packet", hdev->name);
- hdev->stat.err_rx++;
- }
- }
-
- if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
- return;
-
- usb_anchor_urb(urb, &data->bulk_anchor);
- usb_mark_last_busy(data->udev);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static int btmtk_usb_submit_bulk_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct urb *urb;
- unsigned char *buf;
- unsigned int pipe;
- int err, size = HCI_MAX_FRAME_SIZE;
-
- BT_DBG("%s:%s\n", __func__, hdev->name);
-
- if (!data->bulk_rx_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(0, mem_flags);
- if (!urb)
- return -ENOMEM;
-
- buf = kmalloc(size, mem_flags);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress);
-
- usb_fill_bulk_urb(urb, data->udev, pipe, buf, size,
- btmtk_usb_bulk_in_complete, hdev);
-
- urb->transfer_flags |= URB_FREE_BUFFER;
-
- usb_mark_last_busy(data->udev);
- usb_anchor_urb(urb, &data->bulk_anchor);
-
- err = usb_submit_urb(urb, mem_flags);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-}
-
-static void btmtk_usb_isoc_in_complete(struct urb *urb)
-
-{
- struct hci_dev *hdev = urb->context;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int i, err;
-
- BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return;
-
- if (urb->status == 0) {
- for (i = 0; i < urb->number_of_packets; i++) {
- unsigned int offset = urb->iso_frame_desc[i].offset;
- unsigned int length;
- length = urb->iso_frame_desc[i].actual_length;
-
- if (urb->iso_frame_desc[i].status)
- continue;
-
- hdev->stat.byte_rx += length;
-
- if (hci_recv_fragment(hdev, HCI_SCODATA_PKT,
- urb->transfer_buffer + offset,
- length) < 0) {
- BT_ERR("%s corrupted SCO packet", hdev->name);
- hdev->stat.err_rx++;
- }
- }
- }
-
- if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
- return;
-
- usb_anchor_urb(urb, &data->isoc_anchor);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
-{
- int i, offset = 0;
-
- BT_DBG("len %d mtu %d", len, mtu);
-
- for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu;
- i++, offset += mtu, len -= mtu) {
- urb->iso_frame_desc[i].offset = offset;
- urb->iso_frame_desc[i].length = mtu;
- }
-
- if (len && i < BTUSB_MAX_ISOC_FRAMES) {
- urb->iso_frame_desc[i].offset = offset;
- urb->iso_frame_desc[i].length = len;
- i++;
- }
-
- urb->number_of_packets = i;
-}
-
-static int btmtk_usb_submit_isoc_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct urb *urb;
- unsigned char *buf;
- unsigned int pipe;
- int err, size;
-
- BT_DBG("%s\n", __func__);
-
- if (!data->isoc_rx_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags);
- if (!urb)
- return -ENOMEM;
-
- size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) *
- BTUSB_MAX_ISOC_FRAMES;
-
- buf = kmalloc(size, mem_flags);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
-
- usb_fill_int_urb(urb, data->udev, pipe, buf, size,
- btmtk_usb_isoc_in_complete, hdev,
- data->isoc_rx_ep->bInterval);
-
- urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
-
- __fill_isoc_descriptor(urb, size,
- le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
-
- usb_anchor_urb(urb, &data->isoc_anchor);
-
- err = usb_submit_urb(urb, mem_flags);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-}
-
-static int btmtk_usb_open(struct hci_dev *hdev)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s\n", __func__);
-
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- return err;
-
- data->intf->needs_remote_wakeup = 1;
-
- if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
- goto done;
-
- err = btmtk_usb_submit_intr_urb(hdev, GFP_KERNEL);
- if (err < 0)
- goto failed;
-
- err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL);
- if (err < 0) {
- usb_kill_anchored_urbs(&data->intr_anchor);
- goto failed;
- }
-
- set_bit(BTUSB_BULK_RUNNING, &data->flags);
- btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL);
-
-done:
- usb_autopm_put_interface(data->intf);
- return 0;
-
-failed:
- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
- clear_bit(HCI_RUNNING, &hdev->flags);
- usb_autopm_put_interface(data->intf);
- return err;
-}
-
-static void btmtk_usb_stop_traffic(struct btmtk_usb_data *data)
-{
- BT_DBG("%s\n", __func__);
-
- usb_kill_anchored_urbs(&data->intr_anchor);
- usb_kill_anchored_urbs(&data->bulk_anchor);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-}
-
-static int btmtk_usb_close(struct hci_dev *hdev)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s\n", __func__);
-
- if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
- return 0;
-
- cancel_work_sync(&data->work);
- cancel_work_sync(&data->waker);
-
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
-
- btmtk_usb_stop_traffic(data);
-
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- goto failed;
-
- data->intf->needs_remote_wakeup = 0;
- usb_autopm_put_interface(data->intf);
-
-failed:
- usb_scuttle_anchored_urbs(&data->deferred);
- return 0;
-}
-
-static int btmtk_usb_flush(struct hci_dev *hdev)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
-
- BT_DBG("%s\n", __func__);
-
- usb_kill_anchored_urbs(&data->tx_anchor);
-
- return 0;
-}
-
-static void btmtk_usb_tx_complete(struct urb *urb)
-{
- struct sk_buff *skb = urb->context;
- struct hci_dev *hdev = (struct hci_dev *)skb->dev;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
-
- BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (!urb->status)
- hdev->stat.byte_tx += urb->transfer_buffer_length;
- else
- hdev->stat.err_tx++;
-
-done:
- spin_lock(&data->txlock);
- data->tx_in_flight--;
- spin_unlock(&data->txlock);
-
- kfree(urb->setup_packet);
-
- kfree_skb(skb);
-}
-
-static void btmtk_usb_isoc_tx_complete(struct urb *urb)
-{
- struct sk_buff *skb = urb->context;
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-
- BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (!urb->status)
- hdev->stat.byte_tx += urb->transfer_buffer_length;
- else
- hdev->stat.err_tx++;
-
-done:
- kfree(urb->setup_packet);
-
- kfree_skb(skb);
-}
-
-static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- unsigned int pipe;
- int err;
-
- BT_DBG("%s\n", __func__);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return -EBUSY;
-
- switch (bt_cb(skb)->pkt_type) {
- case HCI_COMMAND_PKT:
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
- if (!dr) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- dr->bRequestType = data->cmdreq_type;
- dr->bRequest = 0;
- dr->wIndex = 0;
- dr->wValue = 0;
- dr->wLength = __cpu_to_le16(skb->len);
-
- pipe = usb_sndctrlpipe(data->udev, 0x00);
-
- if (test_bit(HCI_RUNNING, &hdev->flags)) {
- u16 op_code;
- memcpy(&op_code, skb->data, 2);
- BT_DBG("ogf = %x\n", (op_code & 0xfc00) >> 10);
- BT_DBG("ocf = %x\n", op_code & 0x03ff);
- hex_dump("hci command", skb->data, skb->len);
-
- }
-
- usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
- skb->data, skb->len,
- btmtk_usb_tx_complete, skb);
-
- hdev->stat.cmd_tx++;
- break;
-
- case HCI_ACLDATA_PKT:
- if (!data->bulk_tx_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- pipe = usb_sndbulkpipe(data->udev,
- data->bulk_tx_ep->bEndpointAddress);
-
- usb_fill_bulk_urb(urb, data->udev, pipe, skb->data,
- skb->len, btmtk_usb_tx_complete, skb);
-
- hdev->stat.acl_tx++;
- BT_DBG("HCI_ACLDATA_PKT:\n");
- break;
-
- case HCI_SCODATA_PKT:
- if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
- return -ENODEV;
-
- urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- pipe = usb_sndisocpipe(data->udev,
- data->isoc_tx_ep->bEndpointAddress);
-
- usb_fill_int_urb(urb, data->udev, pipe,
- skb->data, skb->len, btmtk_usb_isoc_tx_complete,
- skb, data->isoc_tx_ep->bInterval);
-
- urb->transfer_flags = URB_ISO_ASAP;
-
- __fill_isoc_descriptor(urb, skb->len,
- le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
-
- hdev->stat.sco_tx++;
- BT_DBG("HCI_SCODATA_PKT:\n");
- goto skip_waking;
-
- default:
- return -EILSEQ;
- }
-
- err = inc_tx(data);
-
- if (err) {
- usb_anchor_urb(urb, &data->deferred);
- schedule_work(&data->waker);
- err = 0;
- goto done;
- }
-
-skip_waking:
- usb_anchor_urb(urb, &data->tx_anchor);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- kfree(urb->setup_packet);
- usb_unanchor_urb(urb);
- } else {
- usb_mark_last_busy(data->udev);
- }
-
-done:
- usb_free_urb(urb);
- return err;
-}
-
-static void btmtk_usb_notify(struct hci_dev *hdev, unsigned int evt)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
-
- BT_DBG("%s evt %d", hdev->name, evt);
-
- if (hdev->conn_hash.sco_num != data->sco_num) {
- data->sco_num = hdev->conn_hash.sco_num;
- schedule_work(&data->work);
- }
-}
-
-static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct usb_interface *intf = data->isoc;
- struct usb_endpoint_descriptor *ep_desc;
- int i, err;
-
- if (!data->isoc)
- return -ENODEV;
-
- err = usb_set_interface(data->udev, 1, altsetting);
- if (err < 0) {
- BT_ERR("%s setting interface failed (%d)", hdev->name, -err);
- return err;
- }
-
- data->isoc_altsetting = altsetting;
-
- data->isoc_tx_ep = NULL;
- data->isoc_rx_ep = NULL;
-
- for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
- ep_desc = &intf->cur_altsetting->endpoint[i].desc;
-
- if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) {
- data->isoc_tx_ep = ep_desc;
- continue;
- }
-
- if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) {
- data->isoc_rx_ep = ep_desc;
- continue;
- }
- }
-
- if (!data->isoc_tx_ep || !data->isoc_rx_ep) {
- BT_ERR("%s invalid SCO descriptors", hdev->name);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void btmtk_usb_work(struct work_struct *work)
-{
- struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data,
- work);
- struct hci_dev *hdev = data->hdev;
- int new_alts;
- int err;
-
- BT_DBG("%s\n", __func__);
-
- if (hdev->conn_hash.sco_num > 0) {
- if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc ?
- data->isoc : data->intf);
- if (err < 0) {
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
- return;
- }
-
- set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
- }
-
- if (hdev->voice_setting & 0x0020) {
- static const int alts[3] = { 2, 4, 5 };
- new_alts = alts[hdev->conn_hash.sco_num - 1];
- } else {
- new_alts = hdev->conn_hash.sco_num;
- }
-
- if (data->isoc_altsetting != new_alts) {
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- if (__set_isoc_interface(hdev, new_alts) < 0)
- return;
- }
-
- if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- else
- btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL);
- }
- } else {
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- __set_isoc_interface(hdev, 0);
-
- if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc ?
- data->isoc : data->intf);
- }
-}
-
-static void btmtk_usb_waker(struct work_struct *work)
-{
- struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data,
- waker);
- int err;
-
- err = usb_autopm_get_interface(data->intf);
-
- if (err < 0)
- return;
-
- usb_autopm_put_interface(data->intf);
-}
-
-static int btmtk_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct btmtk_usb_data *data;
- struct usb_endpoint_descriptor *ep_desc;
- int i, err;
- struct hci_dev *hdev;
-
- /* interface numbers are hardcoded in the spec */
- if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
- return -ENODEV;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
-
- if (!data)
- return -ENOMEM;
-
- for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
- ep_desc = &intf->cur_altsetting->endpoint[i].desc;
-
- if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
- data->intr_ep = ep_desc;
- continue;
- }
-
- if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
- data->bulk_tx_ep = ep_desc;
- continue;
- }
-
- if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
- data->bulk_rx_ep = ep_desc;
- continue;
- }
- }
-
- if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
- kfree(data);
- return -ENODEV;
- }
-
- data->cmdreq_type = USB_TYPE_CLASS;
-
- data->udev = interface_to_usbdev(intf);
- data->intf = intf;
-
- spin_lock_init(&data->lock);
- INIT_WORK(&data->work, btmtk_usb_work);
- INIT_WORK(&data->waker, btmtk_usb_waker);
- spin_lock_init(&data->txlock);
-
- init_usb_anchor(&data->tx_anchor);
- init_usb_anchor(&data->intr_anchor);
- init_usb_anchor(&data->bulk_anchor);
- init_usb_anchor(&data->isoc_anchor);
- init_usb_anchor(&data->deferred);
-
- hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
- return -ENOMEM;
- }
-
- hdev->bus = HCI_USB;
-
- hci_set_drvdata(hdev, data);
-
- data->hdev = hdev;
-
- SET_HCIDEV_DEV(hdev, &intf->dev);
-
- hdev->open = btmtk_usb_open;
- hdev->close = btmtk_usb_close;
- hdev->flush = btmtk_usb_flush;
- hdev->send = btmtk_usb_send_frame;
- hdev->notify = btmtk_usb_notify;
-
- /* Interface numbers are hardcoded in the specification */
- data->isoc = usb_ifnum_to_if(data->udev, 1);
-
- if (data->isoc) {
- err = usb_driver_claim_interface(&btmtk_usb_driver,
- data->isoc, data);
- if (err < 0) {
- hci_free_dev(hdev);
- kfree(data);
- return err;
- }
- }
-
- data->io_buf = kmalloc(256, GFP_KERNEL);
- if (!data->io_buf) {
- hci_free_dev(hdev);
- kfree(data);
- return -ENOMEM;
- }
-
- btmtk_usb_switch_iobase(data, WLAN);
-
- btmtk_usb_cap_init(data);
-
- err = hci_register_dev(hdev);
- if (err < 0) {
- hci_free_dev(hdev);
- kfree(data);
- return err;
- }
-
- usb_set_intfdata(intf, data);
-
- return 0;
-}
-
-static void btmtk_usb_disconnect(struct usb_interface *intf)
-{
- struct btmtk_usb_data *data = usb_get_intfdata(intf);
- struct hci_dev *hdev;
-
- BT_DBG("%s\n", __func__);
-
- if (!data)
- return;
-
- hdev = data->hdev;
- usb_set_intfdata(data->intf, NULL);
-
- if (data->isoc)
- usb_set_intfdata(data->isoc, NULL);
-
- hci_unregister_dev(hdev);
-
- if (intf == data->isoc)
- usb_driver_release_interface(&btmtk_usb_driver, data->intf);
- else if (data->isoc)
- usb_driver_release_interface(&btmtk_usb_driver, data->isoc);
-
- hci_free_dev(hdev);
-
- kfree(data->io_buf);
-
- kfree(data);
-}
-
-#ifdef CONFIG_PM
-static int btmtk_usb_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct btmtk_usb_data *data = usb_get_intfdata(intf);
-
- BT_DBG("%s\n", __func__);
-
- if (data->suspend_count++)
- return 0;
-
- spin_lock_irq(&data->txlock);
- if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
- set_bit(BTUSB_SUSPENDING, &data->flags);
- spin_unlock_irq(&data->txlock);
- } else {
- spin_unlock_irq(&data->txlock);
- data->suspend_count--;
- return -EBUSY;
- }
-
- cancel_work_sync(&data->work);
-
- btmtk_usb_stop_traffic(data);
- usb_kill_anchored_urbs(&data->tx_anchor);
-
- return 0;
-}
-
-static void play_deferred(struct btmtk_usb_data *data)
-{
- struct urb *urb;
- int err;
-
- while ((urb = usb_get_from_anchor(&data->deferred))) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0)
- break;
-
- data->tx_in_flight++;
- }
-
- usb_scuttle_anchored_urbs(&data->deferred);
-}
-
-static int btmtk_usb_resume(struct usb_interface *intf)
-{
- struct btmtk_usb_data *data = usb_get_intfdata(intf);
- struct hci_dev *hdev = data->hdev;
- int err = 0;
-
- BT_DBG("%s\n", __func__);
-
- if (--data->suspend_count)
- return 0;
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) {
- err = btmtk_usb_submit_intr_urb(hdev, GFP_NOIO);
- if (err < 0) {
- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
- goto failed;
- }
- }
-
- if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) {
- err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO);
- if (err < 0) {
- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
- goto failed;
- }
-
- btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO);
- }
-
- if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- else
- btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO);
- }
-
- spin_lock_irq(&data->txlock);
- play_deferred(data);
- clear_bit(BTUSB_SUSPENDING, &data->flags);
- spin_unlock_irq(&data->txlock);
- schedule_work(&data->work);
-
- return 0;
-
-failed:
- usb_scuttle_anchored_urbs(&data->deferred);
-done:
- spin_lock_irq(&data->txlock);
- clear_bit(BTUSB_SUSPENDING, &data->flags);
- spin_unlock_irq(&data->txlock);
-
- return err;
-}
-#endif
-
-static struct usb_device_id btmtk_usb_table[] = {
- /* Mediatek MT7650 */
- { USB_DEVICE(0x0e8d, 0x7650) },
- { USB_DEVICE(0x0e8d, 0x7630) },
- { USB_DEVICE(0x0e8d, 0x763e) },
- /* Mediatek MT662 */
- { USB_DEVICE(0x0e8d, 0x7662) },
- { USB_DEVICE(0x0e8d, 0x7632) },
- { } /* Terminating entry */
-};
-
-static struct usb_driver btmtk_usb_driver = {
- .name = "btmtk_usb",
- .probe = btmtk_usb_probe,
- .disconnect = btmtk_usb_disconnect,
-#ifdef CONFIG_PM
- .suspend = btmtk_usb_suspend,
- .resume = btmtk_usb_resume,
-#endif
- .id_table = btmtk_usb_table,
- .supports_autosuspend = 1,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(btmtk_usb_driver);
-
-MODULE_DESCRIPTION("Mediatek Bluetooth USB driver ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(MT7650_FIRMWARE);
-MODULE_FIRMWARE(MT7662_FIRMWARE);
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.h b/drivers/staging/btmtk_usb/btmtk_usb.h
deleted file mode 100644
index 12f0d3b..0000000
--- a/drivers/staging/btmtk_usb/btmtk_usb.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * MediaTek Bluetooth USB Driver
- *
- * Copyright (C) 2013, MediaTek co.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- * or on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- */
-
-#ifndef __BTMTK_USB_H__
-#define __BTMTK_USB_H_
-
-/* Memory map for MTK BT */
-
-/* SYS Control */
-#define SYSCTL 0x400000
-
-/* WLAN */
-#define WLAN 0x410000
-
-/* MCUCTL */
-#define INT_LEVEL 0x0718
-#define COM_REG0 0x0730
-#define SEMAPHORE_00 0x07B0
-#define SEMAPHORE_01 0x07B4
-#define SEMAPHORE_02 0x07B8
-#define SEMAPHORE_03 0x07BC
-
-/* Chip definition */
-
-#define CONTROL_TIMEOUT_JIFFIES ((300 * HZ) / 100)
-#define DEVICE_VENDOR_REQUEST_OUT 0x40
-#define DEVICE_VENDOR_REQUEST_IN 0xc0
-#define DEVICE_CLASS_REQUEST_OUT 0x20
-
-#define BTUSB_MAX_ISOC_FRAMES 10
-#define BTUSB_INTR_RUNNING 0
-#define BTUSB_BULK_RUNNING 1
-#define BTUSB_ISOC_RUNNING 2
-#define BTUSB_SUSPENDING 3
-#define BTUSB_DID_ISO_RESUME 4
-
-/* ROM Patch */
-#define PATCH_HCI_HEADER_SIZE 4
-#define PATCH_WMT_HEADER_SIZE 5
-#define PATCH_HEADER_SIZE (PATCH_HCI_HEADER_SIZE + PATCH_WMT_HEADER_SIZE)
-#define UPLOAD_PATCH_UNIT 2048
-#define PATCH_INFO_SIZE 30
-#define PATCH_PHASE1 1
-#define PATCH_PHASE2 2
-#define PATCH_PHASE3 3
-
-struct btmtk_usb_data {
- struct hci_dev *hdev;
- struct usb_device *udev;
- struct usb_interface *intf;
- struct usb_interface *isoc;
-
- spinlock_t lock;
-
- unsigned long flags;
- struct work_struct work;
- struct work_struct waker;
-
- struct usb_anchor tx_anchor;
- struct usb_anchor intr_anchor;
- struct usb_anchor bulk_anchor;
- struct usb_anchor isoc_anchor;
- struct usb_anchor deferred;
- int tx_in_flight;
- spinlock_t txlock;
-
- struct usb_endpoint_descriptor *intr_ep;
- struct usb_endpoint_descriptor *bulk_tx_ep;
- struct usb_endpoint_descriptor *bulk_rx_ep;
- struct usb_endpoint_descriptor *isoc_tx_ep;
- struct usb_endpoint_descriptor *isoc_rx_ep;
-
- __u8 cmdreq_type;
-
- unsigned int sco_num;
- int isoc_altsetting;
- int suspend_count;
-
- /* request for different io operation */
- u8 w_request;
- u8 r_request;
-
- /* io buffer for usb control transfer */
- char *io_buf;
-
- struct semaphore fw_upload_sem;
-
- /* unsigned char *fw_image; */
- /* unsigned char *rom_patch; */
- const struct firmware *firmware;
- u32 chip_id;
- u8 need_load_fw;
- u8 need_load_rom_patch;
- u32 rom_patch_offset;
- u32 rom_patch_len;
-};
-
-static inline int is_mt7630(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76300000);
-}
-
-static inline int is_mt7650(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76500000);
-}
-
-static inline int is_mt7632(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76320000);
-}
-
-static inline int is_mt7662(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76620000);
-}
-
-#endif
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index 62efd74..bf532b1 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
@@ -630,7 +629,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
}
spin_unlock_irq(&pdx->stagedLock);
- if (pPages) { /* if we decided to release the memory */
+ if (pPages) { /* if we decided to release the memory */
/* Now we must undo the pinning down of the pages. We will assume the worst and mark */
/* all the pages as dirty. Don't be tempted to move this up above as you must not be */
/* holding a spin lock to do this stuff as it is not atomic. */
diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c
index 97c55f9..efc310c 100644
--- a/drivers/staging/ced1401/usb1401.c
+++ b/drivers/staging/ced1401/usb1401.c
@@ -89,7 +89,6 @@ synchronous non-Urb based transfers.
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index bfa27e7..89e25b4 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -884,6 +884,12 @@ config COMEDI_GSC_HPDI
To compile this driver as a module, choose M here: the module will be
called gsc_hpdi.
+config COMEDI_MF6X4
+ tristate "Humusoft MF634 and MF624 DAQ Card support"
+ ---help---
+ This driver supports both Humusoft MF634 and MF624 Data acquisition
+ cards. The legacy Humusoft MF614 card is not supported.
+
config COMEDI_ICP_MULTI
tristate "Inova ICP_MULTI support"
---help---
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index e6dfc98f..fae2d90 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -1,3 +1,5 @@
+ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
+
comedi-y := comedi_fops.o range.o drivers.o \
comedi_buf.o
comedi-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 4e26bd7..924fce9 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -16,6 +16,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include "comedidev.h"
#include "comedi_internal.h"
@@ -26,31 +27,21 @@
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
#endif
-static void __comedi_buf_free(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned n_pages)
+static void comedi_buf_map_kref_release(struct kref *kref)
{
- struct comedi_async *async = s->async;
+ struct comedi_buf_map *bm =
+ container_of(kref, struct comedi_buf_map, refcount);
struct comedi_buf_page *buf;
- unsigned i;
-
- if (async->prealloc_buf) {
- vunmap(async->prealloc_buf);
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- }
+ unsigned int i;
- if (!async->buf_page_list)
- return;
-
- for (i = 0; i < n_pages; ++i) {
- buf = &async->buf_page_list[i];
- if (buf->virt_addr) {
+ if (bm->page_list) {
+ for (i = 0; i < bm->n_pages; i++) {
+ buf = &bm->page_list[i];
clear_bit(PG_reserved,
&(virt_to_page(buf->virt_addr)->flags));
- if (s->async_dma_dir != DMA_NONE) {
+ if (bm->dma_dir != DMA_NONE) {
#ifdef CONFIG_HAS_DMA
- dma_free_coherent(dev->hw_dev,
+ dma_free_coherent(bm->dma_hw_dev,
PAGE_SIZE,
buf->virt_addr,
buf->dma_addr);
@@ -59,10 +50,26 @@ static void __comedi_buf_free(struct comedi_device *dev,
free_page((unsigned long)buf->virt_addr);
}
}
+ vfree(bm->page_list);
}
- vfree(async->buf_page_list);
- async->buf_page_list = NULL;
- async->n_buf_pages = 0;
+ if (bm->dma_dir != DMA_NONE)
+ put_device(bm->dma_hw_dev);
+ kfree(bm);
+}
+
+static void __comedi_buf_free(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct comedi_async *async = s->async;
+
+ if (async->prealloc_buf) {
+ vunmap(async->prealloc_buf);
+ async->prealloc_buf = NULL;
+ async->prealloc_bufsz = 0;
+ }
+
+ comedi_buf_map_put(async->buf_map);
+ async->buf_map = NULL;
}
static void __comedi_buf_alloc(struct comedi_device *dev,
@@ -71,6 +78,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
+ struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned i;
@@ -80,18 +88,29 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
return;
}
- async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
- if (async->buf_page_list)
+ bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
+ if (!bm)
+ return;
+
+ async->buf_map = bm;
+ kref_init(&bm->refcount);
+ bm->dma_dir = s->async_dma_dir;
+ if (bm->dma_dir != DMA_NONE)
+ /* Need ref to hardware device to free buffer later. */
+ bm->dma_hw_dev = get_device(dev->hw_dev);
+
+ bm->page_list = vzalloc(sizeof(*buf) * n_pages);
+ if (bm->page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (!pages)
return;
for (i = 0; i < n_pages; i++) {
- buf = &async->buf_page_list[i];
- if (s->async_dma_dir != DMA_NONE)
+ buf = &bm->page_list[i];
+ if (bm->dma_dir != DMA_NONE)
#ifdef CONFIG_HAS_DMA
- buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
+ buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
PAGE_SIZE,
&buf->dma_addr,
GFP_KERNEL |
@@ -108,6 +127,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
pages[i] = virt_to_page(buf->virt_addr);
}
+ bm->n_pages = i;
/* vmap the prealloc_buf if all the pages were allocated */
if (i == n_pages)
@@ -117,6 +137,26 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
vfree(pages);
}
+void comedi_buf_map_get(struct comedi_buf_map *bm)
+{
+ if (bm)
+ kref_get(&bm->refcount);
+}
+
+int comedi_buf_map_put(struct comedi_buf_map *bm)
+{
+ if (bm)
+ return kref_put(&bm->refcount, comedi_buf_map_kref_release);
+ return 1;
+}
+
+bool comedi_buf_is_mmapped(struct comedi_async *async)
+{
+ struct comedi_buf_map *bm = async->buf_map;
+
+ return bm && (atomic_read(&bm->refcount.refcount) > 1);
+}
+
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
@@ -130,7 +170,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
/* deallocate old buffer */
- __comedi_buf_free(dev, s, async->n_buf_pages);
+ __comedi_buf_free(dev, s);
/* allocate new buffer */
if (new_size) {
@@ -140,10 +180,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
if (!async->prealloc_buf) {
/* allocation failed */
- __comedi_buf_free(dev, s, n_pages);
+ __comedi_buf_free(dev, s);
return -ENOMEM;
}
- async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index f3d59e2..c22c617 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -16,8 +16,6 @@
GNU General Public License for more details.
*/
-#undef DEBUG
-
#include "comedi_compat32.h"
#include <linux/module.h>
@@ -47,15 +45,6 @@
#define COMEDI_NUM_SUBDEVICE_MINORS \
(COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
-#ifdef CONFIG_COMEDI_DEBUG
-int comedi_debug;
-EXPORT_SYMBOL_GPL(comedi_debug);
-module_param(comedi_debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(comedi_debug,
- "enable comedi core and driver debugging if non-zero (default 0)"
- );
-#endif
-
static int comedi_num_legacy_minors;
module_param(comedi_num_legacy_minors, int, S_IRUGO);
MODULE_PARM_DESC(comedi_num_legacy_minors,
@@ -89,11 +78,38 @@ static struct cdev comedi_cdev;
static void comedi_device_init(struct comedi_device *dev)
{
+ kref_init(&dev->refcount);
spin_lock_init(&dev->spinlock);
mutex_init(&dev->mutex);
+ init_rwsem(&dev->attach_lock);
dev->minor = -1;
}
+static void comedi_dev_kref_release(struct kref *kref)
+{
+ struct comedi_device *dev =
+ container_of(kref, struct comedi_device, refcount);
+
+ mutex_destroy(&dev->mutex);
+ put_device(dev->class_dev);
+ kfree(dev);
+}
+
+int comedi_dev_put(struct comedi_device *dev)
+{
+ if (dev)
+ return kref_put(&dev->refcount, comedi_dev_kref_release);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(comedi_dev_put);
+
+static struct comedi_device *comedi_dev_get(struct comedi_device *dev)
+{
+ if (dev)
+ kref_get(&dev->refcount);
+ return dev;
+}
+
static void comedi_device_cleanup(struct comedi_device *dev)
{
struct module *driver_module = NULL;
@@ -104,14 +120,9 @@ static void comedi_device_cleanup(struct comedi_device *dev)
if (dev->attached)
driver_module = dev->driver->module;
comedi_device_detach(dev);
- while (dev->use_count > 0) {
- if (driver_module)
- module_put(driver_module);
- module_put(THIS_MODULE);
- dev->use_count--;
- }
+ if (driver_module && dev->use_count)
+ module_put(driver_module);
mutex_unlock(&dev->mutex);
- mutex_destroy(&dev->mutex);
}
static bool comedi_clear_board_dev(struct comedi_device *dev)
@@ -142,17 +153,17 @@ static struct comedi_device *comedi_clear_board_minor(unsigned minor)
static void comedi_free_board_dev(struct comedi_device *dev)
{
if (dev) {
+ comedi_device_cleanup(dev);
if (dev->class_dev) {
device_destroy(comedi_class,
MKDEV(COMEDI_MAJOR, dev->minor));
}
- comedi_device_cleanup(dev);
- kfree(dev);
+ comedi_dev_put(dev);
}
}
static struct comedi_subdevice
-*comedi_subdevice_from_minor(unsigned minor)
+*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor)
{
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
@@ -160,37 +171,45 @@ static struct comedi_subdevice
BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
mutex_lock(&comedi_subdevice_minor_table_lock);
s = comedi_subdevice_minor_table[i];
+ if (s && s->device != dev)
+ s = NULL;
mutex_unlock(&comedi_subdevice_minor_table_lock);
return s;
}
-static struct comedi_device *comedi_dev_from_board_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
{
struct comedi_device *dev;
BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
mutex_lock(&comedi_board_minor_table_lock);
- dev = comedi_board_minor_table[minor];
+ dev = comedi_dev_get(comedi_board_minor_table[minor]);
mutex_unlock(&comedi_board_minor_table_lock);
return dev;
}
-static struct comedi_device *comedi_dev_from_subdevice_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
{
+ struct comedi_device *dev;
struct comedi_subdevice *s;
+ unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
- s = comedi_subdevice_from_minor(minor);
- return s ? s->device : NULL;
+ BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
+ mutex_lock(&comedi_subdevice_minor_table_lock);
+ s = comedi_subdevice_minor_table[i];
+ dev = comedi_dev_get(s ? s->device : NULL);
+ mutex_unlock(&comedi_subdevice_minor_table_lock);
+ return dev;
}
-struct comedi_device *comedi_dev_from_minor(unsigned minor)
+struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
{
if (minor < COMEDI_NUM_BOARD_MINORS)
- return comedi_dev_from_board_minor(minor);
+ return comedi_dev_get_from_board_minor(minor);
else
- return comedi_dev_from_subdevice_minor(minor);
+ return comedi_dev_get_from_subdevice_minor(minor);
}
-EXPORT_SYMBOL_GPL(comedi_dev_from_minor);
+EXPORT_SYMBOL_GPL(comedi_dev_get_from_minor);
static struct comedi_subdevice *
comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor)
@@ -198,10 +217,8 @@ comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor)
struct comedi_subdevice *s;
if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(minor);
- if (!s || s->device != dev)
- return NULL;
- if (s->subdev_flags & SDF_CMD_READ)
+ s = comedi_subdevice_from_minor(dev, minor);
+ if (s == NULL || (s->subdev_flags & SDF_CMD_READ))
return s;
}
return dev->read_subdev;
@@ -213,10 +230,8 @@ comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor)
struct comedi_subdevice *s;
if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(minor);
- if (!s || s->device != dev)
- return NULL;
- if (s->subdev_flags & SDF_CMD_WRITE)
+ s = comedi_subdevice_from_minor(dev, minor);
+ if (s == NULL || (s->subdev_flags & SDF_CMD_WRITE))
return s;
}
return dev->write_subdev;
@@ -232,11 +247,13 @@ static int resize_async_buffer(struct comedi_device *dev,
return -EPERM;
if (s->busy) {
- DPRINTK("subdevice is busy, cannot resize buffer\n");
+ dev_dbg(dev->class_dev,
+ "subdevice is busy, cannot resize buffer\n");
return -EBUSY;
}
- if (async->mmap_count) {
- DPRINTK("subdevice is mmapped, cannot resize buffer\n");
+ if (comedi_buf_is_mmapped(async)) {
+ dev_dbg(dev->class_dev,
+ "subdevice is mmapped, cannot resize buffer\n");
return -EBUSY;
}
@@ -254,8 +271,8 @@ static int resize_async_buffer(struct comedi_device *dev,
return retval;
}
- DPRINTK("comedi%i subd %d buffer resized to %i bytes\n",
- dev->minor, s->index, async->prealloc_bufsz);
+ dev_dbg(dev->class_dev, "subd %d buffer resized to %i bytes\n",
+ s->index, async->prealloc_bufsz);
return 0;
}
@@ -269,7 +286,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -279,6 +296,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
size = s->async->max_bufsize / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -299,7 +317,7 @@ static ssize_t max_read_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -311,6 +329,7 @@ static ssize_t max_read_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(max_read_buffer_kb);
@@ -323,7 +342,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -333,6 +352,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
size = s->async->prealloc_bufsz / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -353,7 +373,7 @@ static ssize_t read_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -365,6 +385,7 @@ static ssize_t read_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(read_buffer_kb);
@@ -378,7 +399,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -388,6 +409,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
size = s->async->max_bufsize / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -408,7 +430,7 @@ static ssize_t max_write_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -420,6 +442,7 @@ static ssize_t max_write_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(max_write_buffer_kb);
@@ -432,7 +455,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -442,6 +465,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
size = s->async->prealloc_bufsz / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -462,7 +486,7 @@ static ssize_t write_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -474,6 +498,7 @@ static ssize_t write_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(write_buffer_kb);
@@ -562,12 +587,13 @@ static void do_become_nonbusy(struct comedi_device *dev,
async->inttrig = NULL;
kfree(async->cmd.chanlist);
async->cmd.chanlist = NULL;
+ s->busy = NULL;
+ wake_up_interruptible_all(&s->async->wait_head);
} else {
dev_err(dev->class_dev,
"BUG: (?) do_become_nonbusy called with async=NULL\n");
+ s->busy = NULL;
}
-
- s->busy = NULL;
}
static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -582,6 +608,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
+void comedi_device_cancel_all(struct comedi_device *dev)
+{
+ struct comedi_subdevice *s;
+ int i;
+
+ if (!dev->attached)
+ return;
+
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ if (s->async)
+ do_cancel(dev, s);
+ }
+}
+
static int is_device_busy(struct comedi_device *dev)
{
struct comedi_subdevice *s;
@@ -594,7 +635,7 @@ static int is_device_busy(struct comedi_device *dev)
s = &dev->subdevices[i];
if (s->busy)
return 1;
- if (s->async && s->async->mmap_count)
+ if (s->async && comedi_buf_is_mmapped(s->async))
return 1;
}
@@ -684,7 +725,8 @@ static int do_bufconfig_ioctl(struct comedi_device *dev,
async = s->async;
if (!async) {
- DPRINTK("subdevice does not have async capability\n");
+ dev_dbg(dev->class_dev,
+ "subdevice does not have async capability\n");
bc.size = 0;
bc.maximum_size = 0;
goto copyback;
@@ -931,7 +973,8 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
async = s->async;
if (!async) {
- DPRINTK("subdevice does not have async capability\n");
+ dev_dbg(dev->class_dev,
+ "subdevice does not have async capability\n");
bi.buf_write_ptr = 0;
bi.buf_read_ptr = 0;
bi.buf_write_count = 0;
@@ -1083,19 +1126,20 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
break;
}
if (insn->subdev >= dev->n_subdevices) {
- DPRINTK("%d not usable subdevice\n",
+ dev_dbg(dev->class_dev,
+ "%d not usable subdevice\n",
insn->subdev);
ret = -EINVAL;
break;
}
s = &dev->subdevices[insn->subdev];
if (!s->async) {
- DPRINTK("no async\n");
+ dev_dbg(dev->class_dev, "no async\n");
ret = -EINVAL;
break;
}
if (!s->async->inttrig) {
- DPRINTK("no inttrig\n");
+ dev_dbg(dev->class_dev, "no inttrig\n");
ret = -EAGAIN;
break;
}
@@ -1104,7 +1148,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
ret = 1;
break;
default:
- DPRINTK("invalid insn\n");
+ dev_dbg(dev->class_dev, "invalid insn\n");
ret = -EINVAL;
break;
}
@@ -1113,21 +1157,23 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
unsigned int maxdata;
if (insn->subdev >= dev->n_subdevices) {
- DPRINTK("subdevice %d out of range\n", insn->subdev);
+ dev_dbg(dev->class_dev, "subdevice %d out of range\n",
+ insn->subdev);
ret = -EINVAL;
goto out;
}
s = &dev->subdevices[insn->subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
- DPRINTK("%d not usable subdevice\n", insn->subdev);
+ dev_dbg(dev->class_dev, "%d not usable subdevice\n",
+ insn->subdev);
ret = -EIO;
goto out;
}
/* are we locked? (ioctl lock) */
if (s->lock && s->lock != file) {
- DPRINTK("device locked\n");
+ dev_dbg(dev->class_dev, "device locked\n");
ret = -EACCES;
goto out;
}
@@ -1135,7 +1181,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
ret = comedi_check_chanlist(s, 1, &insn->chanspec);
if (ret < 0) {
ret = -EINVAL;
- DPRINTK("bad chanspec\n");
+ dev_dbg(dev->class_dev, "bad chanspec\n");
goto out;
}
@@ -1156,7 +1202,8 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
for (i = 0; i < insn->n; ++i) {
if (data[i] > maxdata) {
ret = -EINVAL;
- DPRINTK("bad data value(s)\n");
+ dev_dbg(dev->class_dev,
+ "bad data value(s)\n");
break;
}
}
@@ -1238,35 +1285,35 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
data = kmalloc(sizeof(unsigned int) * MAX_SAMPLES, GFP_KERNEL);
if (!data) {
- DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
goto error;
}
insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns) {
- DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
goto error;
}
if (copy_from_user(insns, insnlist.insns,
sizeof(*insns) * insnlist.n_insns)) {
- DPRINTK("copy_from_user failed\n");
+ dev_dbg(dev->class_dev, "copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
for (i = 0; i < insnlist.n_insns; i++) {
if (insns[i].n > MAX_SAMPLES) {
- DPRINTK("number of samples too large\n");
+ dev_dbg(dev->class_dev,
+ "number of samples too large\n");
ret = -EINVAL;
goto error;
}
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
insns[i].n * sizeof(unsigned int))) {
- DPRINTK("copy_from_user failed\n");
+ dev_dbg(dev->class_dev,
+ "copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
@@ -1277,7 +1324,8 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
if (insns[i].insn & INSN_MASK_READ) {
if (copy_to_user(insns[i].data, data,
insns[i].n * sizeof(unsigned int))) {
- DPRINTK("copy_to_user failed\n");
+ dev_dbg(dev->class_dev,
+ "copy_to_user failed\n");
ret = -EFAULT;
goto error;
}
@@ -1367,14 +1415,14 @@ static int do_cmd_ioctl(struct comedi_device *dev,
unsigned int __user *user_chanlist;
if (copy_from_user(&cmd, arg, sizeof(cmd))) {
- DPRINTK("bad cmd address\n");
+ dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
}
/* save user's chanlist pointer so it can be restored later */
user_chanlist = (unsigned int __user *)cmd.chanlist;
if (cmd.subdev >= dev->n_subdevices) {
- DPRINTK("%d no such subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
return -ENODEV;
}
@@ -1382,38 +1430,38 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async = s->async;
if (s->type == COMEDI_SUBD_UNUSED) {
- DPRINTK("%d not valid subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
return -EIO;
}
if (!s->do_cmd || !s->do_cmdtest || !s->async) {
- DPRINTK("subdevice %i does not support commands\n",
- cmd.subdev);
+ dev_dbg(dev->class_dev,
+ "subdevice %i does not support commands\n", cmd.subdev);
return -EIO;
}
/* are we locked? (ioctl lock) */
if (s->lock && s->lock != file) {
- DPRINTK("subdevice locked\n");
+ dev_dbg(dev->class_dev, "subdevice locked\n");
return -EACCES;
}
/* are we busy? */
if (s->busy) {
- DPRINTK("subdevice busy\n");
+ dev_dbg(dev->class_dev, "subdevice busy\n");
return -EBUSY;
}
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
- DPRINTK("channel/gain list too long %u > %d\n",
+ dev_dbg(dev->class_dev, "channel/gain list too long %u > %d\n",
cmd.chanlist_len, s->len_chanlist);
return -EINVAL;
}
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
- DPRINTK("channel/gain list too short %u < 1\n",
+ dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n",
cmd.chanlist_len);
return -EINVAL;
}
@@ -1425,7 +1473,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd.chanlist_len * sizeof(int));
if (IS_ERR(async->cmd.chanlist)) {
ret = PTR_ERR(async->cmd.chanlist);
- DPRINTK("memdup_user failed with code %d\n", ret);
+ async->cmd.chanlist = NULL;
+ dev_dbg(dev->class_dev, "memdup_user failed with code %d\n",
+ ret);
goto cleanup;
}
@@ -1434,20 +1484,20 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd.chanlist_len,
async->cmd.chanlist);
if (ret < 0) {
- DPRINTK("bad chanlist\n");
+ dev_dbg(dev->class_dev, "bad chanlist\n");
goto cleanup;
}
ret = s->do_cmdtest(dev, s, &async->cmd);
if (async->cmd.flags & TRIG_BOGUS || ret) {
- DPRINTK("test returned %d\n", ret);
+ dev_dbg(dev->class_dev, "test returned %d\n", ret);
cmd = async->cmd;
/* restore chanlist pointer before copying back */
cmd.chanlist = (unsigned int __force *)user_chanlist;
cmd.data = NULL;
if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- DPRINTK("fault writing cmd\n");
+ dev_dbg(dev->class_dev, "fault writing cmd\n");
ret = -EFAULT;
goto cleanup;
}
@@ -1457,7 +1507,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (!async->prealloc_bufsz) {
ret = -ENOMEM;
- DPRINTK("no buffer (?)\n");
+ dev_dbg(dev->class_dev, "no buffer (?)\n");
goto cleanup;
}
@@ -1469,8 +1519,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & TRIG_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
- comedi_set_subdevice_runflags(s, SRF_USER | SRF_ERROR | SRF_RUNNING,
- SRF_USER | SRF_RUNNING);
+ comedi_set_subdevice_runflags(s, SRF_ERROR | SRF_RUNNING, SRF_RUNNING);
/* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
* comedi_read() or comedi_write() */
@@ -1510,32 +1559,32 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
unsigned int __user *user_chanlist;
if (copy_from_user(&cmd, arg, sizeof(cmd))) {
- DPRINTK("bad cmd address\n");
+ dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
}
/* save user's chanlist pointer so it can be restored later */
user_chanlist = (unsigned int __user *)cmd.chanlist;
if (cmd.subdev >= dev->n_subdevices) {
- DPRINTK("%d no such subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
return -ENODEV;
}
s = &dev->subdevices[cmd.subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
- DPRINTK("%d not valid subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
return -EIO;
}
if (!s->do_cmd || !s->do_cmdtest) {
- DPRINTK("subdevice %i does not support commands\n",
- cmd.subdev);
+ dev_dbg(dev->class_dev,
+ "subdevice %i does not support commands\n", cmd.subdev);
return -EIO;
}
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
- DPRINTK("channel/gain list too long %d > %d\n",
+ dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
cmd.chanlist_len, s->len_chanlist);
ret = -EINVAL;
goto cleanup;
@@ -1547,14 +1596,16 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
cmd.chanlist_len * sizeof(int));
if (IS_ERR(chanlist)) {
ret = PTR_ERR(chanlist);
- DPRINTK("memdup_user exited with code %d", ret);
+ chanlist = NULL;
+ dev_dbg(dev->class_dev,
+ "memdup_user exited with code %d", ret);
goto cleanup;
}
/* make sure each element in channel/gain list is valid */
ret = comedi_check_chanlist(s, cmd.chanlist_len, chanlist);
if (ret < 0) {
- DPRINTK("bad chanlist\n");
+ dev_dbg(dev->class_dev, "bad chanlist\n");
goto cleanup;
}
@@ -1567,7 +1618,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
cmd.chanlist = (unsigned int __force *)user_chanlist;
if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- DPRINTK("bad cmd address\n");
+ dev_dbg(dev->class_dev, "bad cmd address\n");
ret = -EFAULT;
goto cleanup;
}
@@ -1700,8 +1751,6 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
return -EBUSY;
ret = do_cancel(dev, s);
- if (comedi_get_subdevice_runflags(s) & SRF_USER)
- wake_up_interruptible(&s->async->wait_head);
return ret;
}
@@ -1748,12 +1797,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
int rc;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
/* Device config is special, because it must work on
@@ -1782,7 +1828,7 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
}
if (!dev->attached) {
- DPRINTK("no driver configured on /dev/comedi%i\n", dev->minor);
+ dev_dbg(dev->class_dev, "no driver attached\n");
rc = -ENODEV;
goto done;
}
@@ -1852,28 +1898,18 @@ done:
static void comedi_vm_open(struct vm_area_struct *area)
{
- struct comedi_async *async;
- struct comedi_device *dev;
-
- async = area->vm_private_data;
- dev = async->subdevice->device;
+ struct comedi_buf_map *bm;
- mutex_lock(&dev->mutex);
- async->mmap_count++;
- mutex_unlock(&dev->mutex);
+ bm = area->vm_private_data;
+ comedi_buf_map_get(bm);
}
static void comedi_vm_close(struct vm_area_struct *area)
{
- struct comedi_async *async;
- struct comedi_device *dev;
-
- async = area->vm_private_data;
- dev = async->subdevice->device;
+ struct comedi_buf_map *bm;
- mutex_lock(&dev->mutex);
- async->mmap_count--;
- mutex_unlock(&dev->mutex);
+ bm = area->vm_private_data;
+ comedi_buf_map_put(bm);
}
static struct vm_operations_struct comedi_vm_ops = {
@@ -1884,22 +1920,20 @@ static struct vm_operations_struct comedi_vm_ops = {
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s;
struct comedi_async *async;
+ struct comedi_buf_map *bm;
unsigned long start = vma->vm_start;
unsigned long size;
int n_pages;
int i;
int retval;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
+ dev_dbg(dev->class_dev, "no driver attached\n");
retval = -ENODEV;
goto done;
}
@@ -1920,7 +1954,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
if (vma->vm_pgoff != 0) {
- DPRINTK("comedi: mmap() offset must be 0.\n");
+ dev_dbg(dev->class_dev, "mmap() offset must be 0.\n");
retval = -EINVAL;
goto done;
}
@@ -1936,8 +1970,13 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
n_pages = size >> PAGE_SHIFT;
+ bm = async->buf_map;
+ if (!bm || n_pages > bm->n_pages) {
+ retval = -EINVAL;
+ goto done;
+ }
for (i = 0; i < n_pages; ++i) {
- struct comedi_buf_page *buf = &async->buf_page_list[i];
+ struct comedi_buf_page *buf = &bm->page_list[i];
if (remap_pfn_range(vma, start,
page_to_pfn(virt_to_page(buf->virt_addr)),
@@ -1949,9 +1988,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &comedi_vm_ops;
- vma->vm_private_data = async;
+ vma->vm_private_data = bm;
- async->mmap_count++;
+ vma->vm_ops->open(vma);
retval = 0;
done:
@@ -1963,16 +2002,13 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
+ dev_dbg(dev->class_dev, "no driver attached\n");
goto done;
}
@@ -2008,39 +2044,75 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
+ bool on_wait_queue = false;
+ bool attach_locked;
+ unsigned int old_detach_count;
- if (!dev)
- return -ENODEV;
+ /* Protect against device detachment during operation. */
+ down_read(&dev->attach_lock);
+ attach_locked = true;
+ old_detach_count = dev->detach_count;
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ retval = -ENODEV;
+ goto out;
}
s = comedi_write_subdevice(dev, minor);
- if (!s || !s->async)
- return -EIO;
+ if (!s || !s->async) {
+ retval = -EIO;
+ goto out;
+ }
async = s->async;
if (!s->busy || !nbytes)
- return 0;
- if (s->busy != file)
- return -EACCES;
+ goto out;
+ if (s->busy != file) {
+ retval = -EACCES;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
+ on_wait_queue = true;
while (nbytes > 0 && !retval) {
set_current_state(TASK_INTERRUPTIBLE);
if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
- mutex_lock(&dev->mutex);
+ struct comedi_subdevice *new_s;
+
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
- do_become_nonbusy(dev, s);
+ /*
+ * To avoid deadlock, cannot acquire dev->mutex
+ * while dev->attach_lock is held. Need to
+ * remove task from the async wait queue before
+ * releasing dev->attach_lock, as it might not
+ * be valid afterwards.
+ */
+ remove_wait_queue(&async->wait_head, &wait);
+ on_wait_queue = false;
+ up_read(&dev->attach_lock);
+ attach_locked = false;
+ mutex_lock(&dev->mutex);
+ /*
+ * Become non-busy unless things have changed
+ * behind our back. Checking dev->detach_count
+ * is unchanged ought to be sufficient (unless
+ * there have been 2**32 detaches in the
+ * meantime!), but check the subdevice pointer
+ * as well just in case.
+ */
+ new_s = comedi_write_subdevice(dev, minor);
+ if (dev->attached &&
+ old_detach_count == dev->detach_count &&
+ s == new_s && new_s->async == async)
+ do_become_nonbusy(dev, s);
mutex_unlock(&dev->mutex);
}
break;
@@ -2090,8 +2162,12 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
buf += n;
break; /* makes device work like a pipe */
}
+out:
+ if (on_wait_queue)
+ remove_wait_queue(&async->wait_head, &wait);
set_current_state(TASK_RUNNING);
- remove_wait_queue(&async->wait_head, &wait);
+ if (attach_locked)
+ up_read(&dev->attach_lock);
return count ? count : retval;
}
@@ -2104,25 +2180,35 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
+ unsigned int old_detach_count;
+ bool become_nonbusy = false;
+ bool attach_locked;
- if (!dev)
- return -ENODEV;
+ /* Protect against device detachment during operation. */
+ down_read(&dev->attach_lock);
+ attach_locked = true;
+ old_detach_count = dev->detach_count;
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ retval = -ENODEV;
+ goto out;
}
s = comedi_read_subdevice(dev, minor);
- if (!s || !s->async)
- return -EIO;
+ if (!s || !s->async) {
+ retval = -EIO;
+ goto out;
+ }
async = s->async;
if (!s->busy || !nbytes)
- return 0;
- if (s->busy != file)
- return -EACCES;
+ goto out;
+ if (s->busy != file) {
+ retval = -EACCES;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
@@ -2140,13 +2226,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
if (n == 0) {
if (!comedi_is_subdevice_running(s)) {
- mutex_lock(&dev->mutex);
- do_become_nonbusy(dev, s);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
- mutex_unlock(&dev->mutex);
+ become_nonbusy = true;
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2184,14 +2268,37 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (comedi_is_subdevice_idle(s)) {
+ remove_wait_queue(&async->wait_head, &wait);
+ set_current_state(TASK_RUNNING);
+ if (become_nonbusy || comedi_is_subdevice_idle(s)) {
+ struct comedi_subdevice *new_s;
+
+ /*
+ * To avoid deadlock, cannot acquire dev->mutex
+ * while dev->attach_lock is held.
+ */
+ up_read(&dev->attach_lock);
+ attach_locked = false;
mutex_lock(&dev->mutex);
- if (async->buf_read_count - async->buf_write_count == 0)
- do_become_nonbusy(dev, s);
+ /*
+ * Check device hasn't become detached behind our back.
+ * Checking dev->detach_count is unchanged ought to be
+ * sufficient (unless there have been 2**32 detaches in the
+ * meantime!), but check the subdevice pointer as well just in
+ * case.
+ */
+ new_s = comedi_read_subdevice(dev, minor);
+ if (dev->attached && old_detach_count == dev->detach_count &&
+ s == new_s && new_s->async == async) {
+ if (become_nonbusy ||
+ async->buf_read_count - async->buf_write_count == 0)
+ do_become_nonbusy(dev, s);
+ }
mutex_unlock(&dev->mutex);
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&async->wait_head, &wait);
+out:
+ if (attach_locked)
+ up_read(&dev->attach_lock);
return count ? count : retval;
}
@@ -2199,10 +2306,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
static int comedi_open(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_get_from_minor(minor);
+ int rc;
if (!dev) {
- DPRINTK("invalid minor number\n");
+ pr_debug("invalid minor number\n");
return -ENODEV;
}
@@ -2223,9 +2331,9 @@ static int comedi_open(struct inode *inode, struct file *file)
if (dev->attached)
goto ok;
if (!capable(CAP_NET_ADMIN) && dev->in_request_module) {
- DPRINTK("in request module\n");
- mutex_unlock(&dev->mutex);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "in request module\n");
+ rc = -ENODEV;
+ goto out;
}
if (capable(CAP_NET_ADMIN) && dev->in_request_module)
goto ok;
@@ -2241,59 +2349,49 @@ static int comedi_open(struct inode *inode, struct file *file)
dev->in_request_module = false;
if (!dev->attached && !capable(CAP_NET_ADMIN)) {
- DPRINTK("not attached and not CAP_NET_ADMIN\n");
- mutex_unlock(&dev->mutex);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n");
+ rc = -ENODEV;
+ goto out;
}
ok:
- __module_get(THIS_MODULE);
-
- if (dev->attached) {
+ if (dev->attached && dev->use_count == 0) {
if (!try_module_get(dev->driver->module)) {
- module_put(THIS_MODULE);
- mutex_unlock(&dev->mutex);
- return -ENOSYS;
+ rc = -ENOSYS;
+ goto out;
}
- }
-
- if (dev->attached && dev->use_count == 0 && dev->open) {
- int rc = dev->open(dev);
- if (rc < 0) {
- module_put(dev->driver->module);
- module_put(THIS_MODULE);
- mutex_unlock(&dev->mutex);
- return rc;
+ if (dev->open) {
+ rc = dev->open(dev);
+ if (rc < 0) {
+ module_put(dev->driver->module);
+ goto out;
+ }
}
}
dev->use_count++;
+ file->private_data = dev;
+ rc = 0;
+out:
mutex_unlock(&dev->mutex);
-
- return 0;
+ if (rc)
+ comedi_dev_put(dev);
+ return rc;
}
static int comedi_fasync(int fd, struct file *file, int on)
{
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
-
- if (!dev)
- return -ENODEV;
+ struct comedi_device *dev = file->private_data;
return fasync_helper(fd, file, on, &dev->async_queue);
}
static int comedi_close(struct inode *inode, struct file *file)
{
- const unsigned minor = iminor(inode);
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s = NULL;
int i;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
if (dev->subdevices) {
@@ -2306,16 +2404,16 @@ static int comedi_close(struct inode *inode, struct file *file)
s->lock = NULL;
}
}
- if (dev->attached && dev->use_count == 1 && dev->close)
- dev->close(dev);
-
- module_put(THIS_MODULE);
- if (dev->attached)
+ if (dev->attached && dev->use_count == 1) {
+ if (dev->close)
+ dev->close(dev);
module_put(dev->driver->module);
+ }
dev->use_count--;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return 0;
}
@@ -2346,8 +2444,6 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned runflags = 0;
unsigned runflags_mask = 0;
- /* DPRINTK("comedi_event 0x%x\n",mask); */
-
if (!comedi_is_subdevice_running(s))
return;
@@ -2368,16 +2464,11 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
}
if (async->cb_mask & s->async->events) {
- if (comedi_get_subdevice_runflags(s) & SRF_USER) {
- wake_up_interruptible(&async->wait_head);
- if (s->subdev_flags & SDF_CMD_READ)
- kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
- if (s->subdev_flags & SDF_CMD_WRITE)
- kill_fasync(&dev->async_queue, SIGIO, POLL_OUT);
- } else {
- if (async->cb_func)
- async->cb_func(s->async->events, async->cb_arg);
- }
+ wake_up_interruptible(&async->wait_head);
+ if (s->subdev_flags & SDF_CMD_READ)
+ kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
+ if (s->subdev_flags & SDF_CMD_WRITE)
+ kill_fasync(&dev->async_queue, SIGIO, POLL_OUT);
}
s->async->events = 0;
}
@@ -2408,7 +2499,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
if (i == COMEDI_NUM_BOARD_MINORS) {
mutex_unlock(&dev->mutex);
comedi_device_cleanup(dev);
- kfree(dev);
+ comedi_dev_put(dev);
pr_err("comedi: error: ran out of minor numbers for board device files.\n");
return ERR_PTR(-EBUSY);
}
@@ -2416,7 +2507,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
csdev = device_create(comedi_class, hardware_device,
MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i);
if (!IS_ERR(csdev))
- dev->class_dev = csdev;
+ dev->class_dev = get_device(csdev);
/* Note: dev->mutex needs to be unlocked by the caller. */
return dev;
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index fda1a7b..9a74657 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -16,7 +16,11 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s);
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size);
void comedi_buf_reset(struct comedi_async *async);
+bool comedi_buf_is_mmapped(struct comedi_async *async);
+void comedi_buf_map_get(struct comedi_buf_map *bm);
+int comedi_buf_map_put(struct comedi_buf_map *bm);
unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
+void comedi_device_cancel_all(struct comedi_device *dev);
extern unsigned int comedi_default_buf_size_kb;
extern unsigned int comedi_default_buf_maxsize_kb;
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 143be80..f82bd42 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -20,14 +20,13 @@
#define _COMEDIDEV_H
#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
#include "comedi.h"
-#define DPRINTK(format, args...) do { \
- if (comedi_debug) \
- pr_debug("comedi: " format, ## args); \
-} while (0)
-
#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
COMEDI_MINORVERSION, COMEDI_MICROVERSION)
@@ -100,18 +99,22 @@ struct comedi_buf_page {
dma_addr_t dma_addr;
};
+struct comedi_buf_map {
+ struct device *dma_hw_dev;
+ struct comedi_buf_page *page_list;
+ unsigned int n_pages;
+ enum dma_data_direction dma_dir;
+ struct kref refcount;
+};
+
struct comedi_async {
struct comedi_subdevice *subdevice;
void *prealloc_buf; /* pre-allocated buffer */
unsigned int prealloc_bufsz; /* buffer size, in bytes */
- /* virtual and dma address of each page */
- struct comedi_buf_page *buf_page_list;
- unsigned n_buf_pages; /* num elements in buf_page_list */
+ struct comedi_buf_map *buf_map; /* map of buffer pages */
unsigned int max_bufsize; /* maximum buffer size, bytes */
- /* current number of mmaps of prealloc_buf */
- unsigned int mmap_count;
/* byte count for writer (write completed) */
unsigned int buf_write_count;
@@ -141,10 +144,7 @@ struct comedi_async {
wait_queue_head_t wait_head;
- /* callback stuff */
unsigned int cb_mask;
- int (*cb_func) (unsigned int flags, void *);
- void *cb_arg;
int (*inttrig) (struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int x);
@@ -173,6 +173,7 @@ struct comedi_device {
struct device *class_dev;
int minor;
+ unsigned int detach_count;
/* hw_dev is passed to dma_alloc_coherent when allocating async buffers
* for subdevices that have async_dma_dir set to something other than
* DMA_NONE */
@@ -185,6 +186,8 @@ struct comedi_device {
bool ioenabled:1;
spinlock_t spinlock;
struct mutex mutex;
+ struct rw_semaphore attach_lock;
+ struct kref refcount;
int n_subdevices;
struct comedi_subdevice *subdevices;
@@ -208,12 +211,6 @@ static inline const void *comedi_board(const struct comedi_device *dev)
return dev->board_ptr;
}
-#ifdef CONFIG_COMEDI_DEBUG
-extern int comedi_debug;
-#else
-static const int comedi_debug;
-#endif
-
/*
* function prototypes
*/
@@ -231,7 +228,8 @@ enum comedi_minor_bits {
static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
-struct comedi_device *comedi_dev_from_minor(unsigned minor);
+struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
+int comedi_dev_put(struct comedi_device *dev);
void init_polling(void);
void cleanup_polling(void);
@@ -240,7 +238,6 @@ void stop_polling(struct comedi_device *);
/* subdevice runflags */
enum subdevice_runflags {
- SRF_USER = 0x00000001,
SRF_RT = 0x00000002,
/* indicates an COMEDI_CB_ERROR event has occurred since the last
* command was started */
@@ -410,6 +407,7 @@ void comedi_driver_unregister(struct comedi_driver *);
#define PCI_VENDOR_ID_IOTECH 0x1616
#define PCI_VENDOR_ID_CONTEC 0x1221
#define PCI_VENDOR_ID_RTD 0x1435
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
struct pci_dev;
struct pci_driver;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 4964d2a..2460803 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -95,7 +95,7 @@ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdevices);
-static void cleanup_device(struct comedi_device *dev)
+static void comedi_device_detach_cleanup(struct comedi_device *dev)
{
int i;
struct comedi_subdevice *s;
@@ -133,10 +133,14 @@ static void cleanup_device(struct comedi_device *dev)
void comedi_device_detach(struct comedi_device *dev)
{
+ comedi_device_cancel_all(dev);
+ down_write(&dev->attach_lock);
dev->attached = false;
+ dev->detach_count++;
if (dev->driver)
dev->driver->detach(dev);
- cleanup_device(dev);
+ comedi_device_detach_cleanup(dev);
+ up_write(&dev->attach_lock);
}
static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -355,8 +359,9 @@ static int comedi_device_postconfig(struct comedi_device *dev)
ret = __comedi_device_postconfig(dev);
if (ret < 0)
return ret;
- smp_wmb();
+ down_write(&dev->attach_lock);
dev->attached = true;
+ up_write(&dev->attach_lock);
return 0;
}
@@ -598,8 +603,12 @@ int comedi_auto_config(struct device *hardware_device,
}
dev = comedi_alloc_board_minor(hardware_device);
- if (IS_ERR(dev))
+ if (IS_ERR(dev)) {
+ dev_warn(hardware_device,
+ "driver '%s' could not create device.\n",
+ driver->driver_name);
return PTR_ERR(dev);
+ }
/* Note: comedi_alloc_board_minor() locked dev->mutex. */
dev->driver = driver;
@@ -611,8 +620,20 @@ int comedi_auto_config(struct device *hardware_device,
comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
- if (ret < 0)
+ if (ret < 0) {
+ dev_warn(hardware_device,
+ "driver '%s' failed to auto-configure device.\n",
+ driver->driver_name);
comedi_release_hardware_device(hardware_device);
+ } else {
+ /*
+ * class_dev should be set properly here
+ * after a successful auto config
+ */
+ dev_info(dev->class_dev,
+ "driver '%s' has successfully auto-configured '%s'.\n",
+ driver->driver_name, dev->board_name);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(comedi_auto_config);
@@ -657,7 +678,7 @@ void comedi_driver_unregister(struct comedi_driver *driver)
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device *dev = comedi_dev_from_minor(i);
+ struct comedi_device *dev = comedi_dev_get_from_minor(i);
if (!dev)
continue;
@@ -671,6 +692,7 @@ void comedi_driver_unregister(struct comedi_driver *driver)
comedi_device_detach(dev);
}
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
}
}
EXPORT_SYMBOL_GPL(comedi_driver_unregister);
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index b4009e8..48817f0 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -94,7 +94,7 @@ I/O port base address can be found in the output of 'lspci -v'.
struct subdev_8255_private {
unsigned long iobase;
- int (*io) (int, int, int, unsigned long);
+ int (*io)(int, int, int, unsigned long);
};
static int subdev_8255_io(int dir, int port, int data, unsigned long iobase)
@@ -262,7 +262,7 @@ static int subdev_8255_cancel(struct comedi_device *dev,
}
int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io) (int, int, int, unsigned long),
+ int (*io)(int, int, int, unsigned long),
unsigned long iobase)
{
struct subdev_8255_private *spriv;
@@ -289,7 +289,7 @@ int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
EXPORT_SYMBOL_GPL(subdev_8255_init);
int subdev_8255_init_irq(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io) (int, int, int, unsigned long),
+ int (*io)(int, int, int, unsigned long),
unsigned long iobase)
{
int ret;
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index c55f234..8a57c3c 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -263,7 +263,7 @@ static int pci_8255_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
+static const struct pci_device_id pci_8255_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 },
{ PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 },
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 94cbd26..2706f58 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -1,5 +1,6 @@
# Makefile for individual comedi drivers
#
+ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
# Comedi "helper" modules
@@ -110,6 +111,7 @@ obj-$(CONFIG_COMEDI_NI_PCIMIO) += ni_pcimio.o
obj-$(CONFIG_COMEDI_RTD520) += rtd520.o
obj-$(CONFIG_COMEDI_S626) += s626.o
obj-$(CONFIG_COMEDI_SSV_DNP) += ssv_dnp.o
+obj-$(CONFIG_COMEDI_MF6X4) += mf6x4.o
# Comedi PCMCIA drivers
obj-$(CONFIG_COMEDI_CB_DAS16_CS) += cb_das16_cs.o
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index 3c9eec8..bd05857 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -1414,7 +1414,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int next_dma_buf, samplesinbuf;
unsigned long low_word, high_word, var;
unsigned int ui_Tmp;
@@ -1568,8 +1568,8 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
int n_chan, i;
- struct comedi_subdevice *s = &dev->subdevices[0];
int err = 1;
n_chan = devpriv->ui_AiNbrofChannels;
@@ -1593,11 +1593,11 @@ static void v_APCI3120_Interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned short int_daq;
unsigned int int_amcc, ui_Check, i;
unsigned short us_TmpValue;
unsigned char b_DummyRead;
- struct comedi_subdevice *s = &dev->subdevices[0];
ui_Check = 1;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index dc73d4d..8c85a09 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -2590,8 +2590,8 @@ static int i_APCI3200_CommandAnalogInput(struct comedi_device *dev,
static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int ui_StatusRegister = 0;
- struct comedi_subdevice *s = &dev->subdevices[0];
/* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
/* comedi_async *async = s->async; */
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index 8d229b2..ccd4921 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -58,7 +58,7 @@ static int apci035_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci035_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci035_pci_table) = {
+static const struct pci_device_id apci035_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x0300) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 34ab067..0daa0ea 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -325,8 +325,8 @@ static int apci1032_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (dev->irq) {
dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI | SDF_CMD_READ;
- s->subdev_flags = SDF_READABLE;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -364,7 +364,7 @@ static int apci1032_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1032_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1032_pci_table) = {
+static const struct pci_device_id apci1032_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1003) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index ae9ded6..74f7ace 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -57,7 +57,7 @@ static int apci1500_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1500_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1500_pci_table) = {
+static const struct pci_device_id apci1500_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80fc) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index 9d1b142..e9c5291 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -206,7 +206,7 @@ static int apci1516_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1516_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1516_pci_table) = {
+static const struct pci_device_id apci1516_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x1000), BOARD_APCI1016 },
{ PCI_VDEVICE(ADDIDATA, 0x1001), BOARD_APCI1516 },
{ PCI_VDEVICE(ADDIDATA, 0x1002), BOARD_APCI2016 },
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index c5717d6..6248284 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -55,7 +55,7 @@ static int apci1564_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1564_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1564_pci_table) = {
+static const struct pci_device_id apci1564_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1006) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 5ee204bc..28df4b5 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -168,7 +168,7 @@ static int apci16xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci16xx_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci16xx_pci_table) = {
+static const struct pci_device_id apci16xx_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x1009), BOARD_APCI1648 },
{ PCI_VDEVICE(ADDIDATA, 0x100a), BOARD_APCI1696 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index c77ee87..c9b933c 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -359,7 +359,7 @@ static int apci2032_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci2032_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci2032_pci_table) = {
+static const struct pci_device_id apci2032_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1004) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 7fb32e7..e1a9165 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -134,7 +134,7 @@ static int apci2200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci2200_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci2200_pci_table) = {
+static const struct pci_device_id apci2200_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1005) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 67d09e8..1e6fa56 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -230,7 +230,7 @@ static int apci3120_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3120_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3120_pci_table) = {
+static const struct pci_device_id apci3120_pci_table[] = {
{ PCI_VDEVICE(AMCC, 0x818d), BOARD_APCI3120 },
{ PCI_VDEVICE(AMCC, 0x828d), BOARD_APCI3001 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 1213d5a..9868a53 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -109,7 +109,7 @@ static int apci3200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3200_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3200_pci_table) = {
+static const struct pci_device_id apci3200_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x3000), BOARD_APCI3200 },
{ PCI_VDEVICE(ADDIDATA, 0x3007), BOARD_APCI3300 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 6138440..4cb69ec 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -428,7 +428,7 @@ static int apci3501_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = {
+static const struct pci_device_id apci3501_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 761cbf8..ceadf8e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -915,7 +915,7 @@ static int apci3xxx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3xxx_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3xxx_pci_table) = {
+static const struct pci_device_id apci3xxx_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x3010), BOARD_APCI3000_16 },
{ PCI_VDEVICE(ADDIDATA, 0x300f), BOARD_APCI3000_8 },
{ PCI_VDEVICE(ADDIDATA, 0x300e), BOARD_APCI3000_4 },
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index dd092c795..5c1413a 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -242,7 +242,7 @@ static int adl_pci6208_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci6208_pci_table) = {
+static const struct pci_device_id adl_pci6208_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x6208), BOARD_PCI6208 },
{ PCI_VDEVICE(ADLINK, 0x6216), BOARD_PCI6216 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 5617f5c..6f622b4 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -259,7 +259,7 @@ static int adl_pci7x3x_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci7x3x_pci_table) = {
+static const struct pci_device_id adl_pci7x3x_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x7230), BOARD_PCI7230 },
{ PCI_VDEVICE(ADLINK, 0x7233), BOARD_PCI7233 },
{ PCI_VDEVICE(ADLINK, 0x7234), BOARD_PCI7234 },
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index b3d0092..300df55 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -145,7 +145,7 @@ static int adl_pci8164_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = {
+static const struct pci_device_id adl_pci8164_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x8164) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index eab8da2..363f2e4 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -125,8 +125,7 @@ TODO:
PLX9052_INTCSR_LI2STAT)
static const struct comedi_lrange pci9111_ai_range = {
- 5,
- {
+ 5, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
@@ -470,11 +469,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
struct pci9111_private_data *dev_private = dev->private;
struct comedi_cmd *async_cmd = &s->async->cmd;
- if (!dev->irq) {
- comedi_error(dev,
- "no irq assigned for PCI9111, cannot do hardware conversion");
- return -1;
- }
/* Set channel scan limit */
/* PCI9111 allows only scanning from channel 0 to channel n */
/* TODO: handle the case of an external multiplexer */
@@ -858,12 +852,11 @@ static int pci9111_auto_attach(struct comedi_device *dev,
pci9111_reset(dev);
- if (pcidev->irq > 0) {
- ret = request_irq(dev->irq, pci9111_interrupt,
+ if (pcidev->irq) {
+ ret = request_irq(pcidev->irq, pci9111_interrupt,
IRQF_SHARED, dev->board_name, dev);
- if (ret)
- return ret;
- dev->irq = pcidev->irq;
+ if (ret == 0)
+ dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, 4);
@@ -871,18 +864,21 @@ static int pci9111_auto_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_COMMON;
s->n_chan = 16;
s->maxdata = 0xffff;
- s->len_chanlist = 16;
s->range_table = &pci9111_ai_range;
- s->cancel = pci9111_ai_cancel;
s->insn_read = pci9111_ai_insn_read;
- s->do_cmdtest = pci9111_ai_do_cmd_test;
- s->do_cmd = pci9111_ai_do_cmd;
- s->munge = pci9111_ai_munge;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = pci9111_ai_do_cmd_test;
+ s->do_cmd = pci9111_ai_do_cmd;
+ s->cancel = pci9111_ai_cancel;
+ s->munge = pci9111_ai_munge;
+ }
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
@@ -938,7 +934,7 @@ static int pci9111_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
+static const struct pci_device_id pci9111_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) },
/* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
{ 0 }
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 9864896..4bdd972 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -194,28 +194,30 @@ Configuration options:
#define EXTTRG_AI 0 /* ext trg is used by AI */
-static const struct comedi_lrange range_pci9118dg_hr = { 8, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+static const struct comedi_lrange range_pci9118dg_hr = {
+ 8, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_pci9118hg = { 8, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+static const struct comedi_lrange range_pci9118hg = {
+ 8, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
#define PCI9118_BIPOLAR_RANGES 4 /*
@@ -1126,7 +1128,7 @@ static irqreturn_t interrupt_pci9118(int irq, void *d)
}
}
- (devpriv->int_ai_func) (dev, &dev->subdevices[0], int_adstat,
+ (devpriv->int_ai_func) (dev, dev->read_subdev, int_adstat,
int_amcc, int_daq);
}
@@ -1965,7 +1967,6 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
int ret, pages, i;
- unsigned int irq;
u16 u16w;
dev->board_name = this_board->name;
@@ -2036,12 +2037,18 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
pci_write_config_word(pcidev, PCI_COMMAND, u16w | 64);
/* Enable parity check for parity error */
+ if (!disable_irq && pcidev->irq) {
+ ret = request_irq(pcidev->irq, interrupt_pci9118, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
if (devpriv->usemux)
@@ -2050,11 +2057,17 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
s->n_chan = this_board->n_aichan;
s->maxdata = this_board->ai_maxdata;
- s->len_chanlist = this_board->n_aichanlist;
s->range_table = this_board->rangelist_ai;
- s->cancel = pci9118_ai_cancel;
s->insn_read = pci9118_insn_read_ai;
- s->munge = pci9118_ai_munge;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = this_board->n_aichanlist;
+ s->do_cmdtest = pci9118_ai_cmdtest;
+ s->do_cmd = pci9118_ai_cmd;
+ s->cancel = pci9118_ai_cancel;
+ s->munge = pci9118_ai_munge;
+ }
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
@@ -2100,27 +2113,7 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
break;
}
- if (disable_irq)
- irq = 0;
- else
- irq = pcidev->irq;
- if (irq > 0) {
- if (request_irq(irq, interrupt_pci9118, IRQF_SHARED,
- dev->board_name, dev)) {
- dev_warn(dev->class_dev,
- "unable to allocate IRQ %u, DISABLING IT\n",
- irq);
- } else {
- dev->irq = irq;
- /* Enable AI commands */
- s = &dev->subdevices[0];
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmdtest = pci9118_ai_cmdtest;
- s->do_cmd = pci9118_ai_cmd;
- }
- }
-
- pci9118_report_attach(dev, irq);
+ pci9118_report_attach(dev, dev->irq);
return 0;
}
@@ -2217,7 +2210,7 @@ static int adl_pci9118_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci9118_pci_table) = {
+static const struct pci_device_id adl_pci9118_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80d9) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index 8150a67..3190ef7 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -97,21 +97,22 @@ If you do not specify any options, they will default to
#define TIMEOUT 20
/* available ranges through the PGA gains */
-static const struct comedi_lrange range_adq12b_ai_bipolar = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5)
- }
+static const struct comedi_lrange range_adq12b_ai_bipolar = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5)
+ }
};
-static const struct comedi_lrange range_adq12b_ai_unipolar = { 4, {
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE
- (0.5)
- }
+static const struct comedi_lrange range_adq12b_ai_unipolar = {
+ 4, {
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5)
+ }
};
struct adq12b_private {
@@ -162,8 +163,6 @@ static int adq12b_ai_rinsn(struct comedi_device *dev,
hi = inb(dev->iobase + ADQ12B_ADHIG);
lo = inb(dev->iobase + ADQ12B_ADLOW);
- /* printk("debug: chan=%d range=%d status=%d hi=%d lo=%d\n",
- channel, range, status, hi, lo); */
data[n] = (hi << 8) | lo;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index c3fdcab..593676c 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -115,65 +115,70 @@ Configuration options:
/* D/A synchronized control (PCI1720_SYNCONT) */
#define Syncont_SC0 1 /* set synchronous output mode */
-static const struct comedi_lrange range_pci1710_3 = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(10),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+static const struct comedi_lrange range_pci1710_3 = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
static const char range_codes_pci1710_3[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
0x10, 0x11, 0x12, 0x13 };
-static const struct comedi_lrange range_pci1710hg = { 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+static const struct comedi_lrange range_pci1710hg = {
+ 12, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
static const char range_codes_pci1710hg[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
0x05, 0x06, 0x07, 0x10, 0x11,
0x12, 0x13 };
-static const struct comedi_lrange range_pci17x1 = { 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
+static const struct comedi_lrange range_pci17x1 = {
+ 5, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
static const char range_codes_pci17x1[] = { 0x00, 0x01, 0x02, 0x03, 0x04 };
-static const struct comedi_lrange range_pci1720 = { 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange range_pci1720 = {
+ 4, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10)
+ }
};
-static const struct comedi_lrange range_pci171x_da = { 2, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+static const struct comedi_lrange range_pci171x_da = {
+ 2, {
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
enum pci1710_boardid {
@@ -731,7 +736,7 @@ static void interrupt_pci1710_every_sample(void *d)
{
struct comedi_device *dev = d;
struct pci1710_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int m;
#ifdef PCI171x_PARANOIDCHECK
const struct boardtype *this_board = comedi_board(dev);
@@ -740,16 +745,15 @@ static void interrupt_pci1710_every_sample(void *d)
m = inw(dev->iobase + PCI171x_STATUS);
if (m & Status_FE) {
- printk("comedi%d: A/D FIFO empty (%4x)\n", dev->minor, m);
+ dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return;
}
if (m & Status_FF) {
- printk
- ("comedi%d: A/D FIFO Full status (Fatal Error!) (%4x)\n",
- dev->minor, m);
+ dev_dbg(dev->class_dev,
+ "A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -825,12 +829,12 @@ static int move_block_from_fifo(struct comedi_device *dev,
sampl = inw(dev->iobase + PCI171x_AD_DATA);
if (this_board->cardtype != TYPE_PCI1713)
if ((sampl & 0xf000) != devpriv->act_chanlist[j]) {
- printk
- ("comedi%d: A/D FIFO data dropout: received data from channel %d, expected %d! (%d/%d/%d/%d/%d/%4x)\n",
- dev->minor, (sampl & 0xf000) >> 12,
- (devpriv->act_chanlist[j] & 0xf000) >> 12,
- i, j, devpriv->ai_act_scan, n, turn,
- sampl);
+ dev_dbg(dev->class_dev,
+ "A/D FIFO data dropout: received data from channel %d, expected %d! (%d/%d/%d/%d/%d/%4x)\n",
+ (sampl & 0xf000) >> 12,
+ (devpriv->act_chanlist[j] & 0xf000) >> 12,
+ i, j, devpriv->ai_act_scan, n, turn,
+ sampl);
pci171x_ai_cancel(dev, s);
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_ERROR;
@@ -860,22 +864,20 @@ static void interrupt_pci1710_half_fifo(void *d)
struct comedi_device *dev = d;
const struct boardtype *this_board = comedi_board(dev);
struct pci1710_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int m, samplesinbuf;
m = inw(dev->iobase + PCI171x_STATUS);
if (!(m & Status_FH)) {
- printk("comedi%d: A/D FIFO not half full! (%4x)\n",
- dev->minor, m);
+ dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return;
}
if (m & Status_FF) {
- printk
- ("comedi%d: A/D FIFO Full status (Fatal Error!) (%4x)\n",
- dev->minor, m);
+ dev_dbg(dev->class_dev,
+ "A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -1267,21 +1269,21 @@ static int pci1710_auto_attach(struct comedi_device *dev,
if (this_board->n_aichan) {
s = &dev->subdevices[subdev];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
if (this_board->n_aichand)
s->subdev_flags |= SDF_DIFF;
s->n_chan = this_board->n_aichan;
s->maxdata = this_board->ai_maxdata;
- s->len_chanlist = this_board->n_aichan;
s->range_table = this_board->rangelist_ai;
- s->cancel = pci171x_ai_cancel;
s->insn_read = pci171x_insn_read_ai;
if (dev->irq) {
+ dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = pci171x_ai_cmdtest;
s->do_cmd = pci171x_ai_cmd;
+ s->cancel = pci171x_ai_cancel;
}
devpriv->i8254_osc_base = I8254_OSC_BASE_10MHZ;
subdev++;
@@ -1374,7 +1376,7 @@ static int adv_pci1710_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci1710_pci_table) = {
+static const struct pci_device_id adv_pci1710_pci_table[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050),
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index bd4f781..7239426 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -306,7 +306,7 @@ static int adv_pci1723_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci1723_pci_table) = {
+static const struct pci_device_id adv_pci1723_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index 009a303..af670ac 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -116,8 +116,8 @@ enum board_id_contents {
BOARD_ID_MASK = 0xf
};
-static const struct comedi_lrange ao_ranges_1724 = { 4,
- {
+static const struct comedi_lrange ao_ranges_1724 = {
+ 4, {
BIP_RANGE(10),
RANGE_mA(0, 20),
RANGE_mA(4, 20),
@@ -381,7 +381,7 @@ static int adv_pci1724_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci1724_pci_table) = {
+static const struct pci_device_id adv_pci1724_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1724) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 6bac665..d4bd61d 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1188,7 +1188,7 @@ static int adv_pci_dio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adv_pci_dio_driver, cardtype);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci_dio_pci_table) = {
+static const struct pci_device_id adv_pci_dio_pci_table[] = {
{ PCI_VDEVICE(ADVANTECH, 0x1730), TYPE_PCI1730 },
{ PCI_VDEVICE(ADVANTECH, 0x1733), TYPE_PCI1733 },
{ PCI_VDEVICE(ADVANTECH, 0x1734), TYPE_PCI1734 },
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index abb2849..68c3fb3 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -181,13 +181,12 @@ static int aio_aio12_8_ao_write(struct comedi_device *dev,
}
static const struct comedi_lrange range_aio_aio12_8 = {
- 4,
- {
- UNI_RANGE(5),
- BIP_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(10),
- }
+ 4, {
+ UNI_RANGE(5),
+ BIP_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(10)
+ }
};
static int aio_aio12_8_attach(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index b810d5f..2ba7364 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -145,12 +145,12 @@
#define AINT_READ_COMPL 0x00008000
#define AINT_WRITE_COMPL 0x00004000
-#define AINT_OMB_ENABLE 0x00001000
-#define AINT_OMB_SELECT 0x00000c00
+#define AINT_OMB_ENABLE 0x00001000
+#define AINT_OMB_SELECT 0x00000c00
#define AINT_OMB_BYTE 0x00000300
-#define AINT_IMB_ENABLE 0x00000010
-#define AINT_IMB_SELECT 0x0000000c
+#define AINT_IMB_ENABLE 0x00000010
+#define AINT_IMB_SELECT 0x0000000c
#define AINT_IMB_BYTE 0x00000003
/* these are bits from various different registers, needs cleanup XXX */
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 2e4bf28..9c9559f 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -513,7 +513,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
int event = 0;
spin_lock_irqsave(&subpriv->spinlock, flags);
- subpriv->active = 1;
+ subpriv->active = true;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
index a810a24..e036738 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
@@ -439,7 +439,7 @@ static struct comedi_driver dio200_pci_comedi_driver = {
.detach = dio200_pci_detach,
};
-static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = {
+static const struct pci_device_id dio200_pci_table[] = {
{
PCI_VDEVICE(AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215),
pci215_model
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 98075f9..31734e1 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -356,7 +356,7 @@ static int pc236_intr_cancel(struct comedi_device *dev,
static irqreturn_t pc236_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct comedi_subdevice *s = &dev->subdevices[1];
+ struct comedi_subdevice *s = dev->read_subdev;
int handled;
handled = pc236_intr_check(dev);
@@ -567,7 +567,7 @@ static struct comedi_driver amplc_pc236_driver = {
};
#if DO_PCI
-static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = {
+static const struct pci_device_id pc236_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 810e397..ae4048a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -267,17 +267,16 @@ Caveats:
/* The software selectable internal ranges for PCI224 (option[2] == 0). */
static const struct comedi_lrange range_pci224_internal = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
static const unsigned short hwrange_pci224_internal[8] = {
@@ -293,11 +292,10 @@ static const unsigned short hwrange_pci224_internal[8] = {
/* The software selectable external ranges for PCI224 (option[2] == 1). */
static const struct comedi_lrange range_pci224_external = {
- 2,
- {
- RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */
- RANGE_ext(0, 1), /* unipolar [0,+Vref] */
- }
+ 2, {
+ RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */
+ RANGE_ext(0, 1) /* unipolar [0,+Vref] */
+ }
};
static const unsigned short hwrange_pci224_external[2] = {
@@ -308,19 +306,17 @@ static const unsigned short hwrange_pci224_external[2] = {
/* The hardware selectable Vref*2 external range for PCI234
* (option[2] == 1, option[3+n] == 0). */
static const struct comedi_lrange range_pci234_ext2 = {
- 1,
- {
- RANGE_ext(-2, 2),
- }
+ 1, {
+ RANGE_ext(-2, 2)
+ }
};
/* The hardware selectable Vref external range for PCI234
* (option[2] == 1, option[3+n] == 1). */
static const struct comedi_lrange range_pci234_ext = {
- 1,
- {
- RANGE_ext(-1, 1),
- }
+ 1, {
+ RANGE_ext(-1, 1)
+ }
};
/* This serves for all the PCI234 ranges. */
@@ -909,16 +905,14 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
}
if (errors) {
if (errors & dupchan_err) {
- DPRINTK("comedi%d: " DRIVER_NAME
- ": ao_cmdtest: "
- "entries in chanlist must contain no "
- "duplicate channels\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: entries in chanlist must contain no duplicate channels\n",
+ __func__);
}
if (errors & range_err) {
- DPRINTK("comedi%d: " DRIVER_NAME
- ": ao_cmdtest: "
- "entries in chanlist must all have "
- "the same range index\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: entries in chanlist must all have the same range index\n",
+ __func__);
}
err++;
}
@@ -1142,7 +1136,7 @@ static irqreturn_t pci224_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pci224_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->write_subdev;
struct comedi_cmd *cmd;
unsigned char intstat, valid_intstat;
unsigned char curenab;
@@ -1498,7 +1492,7 @@ static int amplc_pci224_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(amplc_pci224_pci_table) = {
+static const struct pci_device_id amplc_pci224_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index a97bbd6..c08dfbb 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -546,15 +546,16 @@ static const unsigned int pci230_timebase[8] = {
};
/* PCI230 analogue input range table */
-static const struct comedi_lrange pci230_ai_range = { 7, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5)
- }
+static const struct comedi_lrange pci230_ai_range = {
+ 7, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5)
+ }
};
/* PCI230 analogue gain bits for each input range. */
@@ -564,10 +565,11 @@ static const unsigned char pci230_ai_gain[7] = { 0, 1, 2, 3, 1, 2, 3 };
static const unsigned char pci230_ai_bipolar[7] = { 1, 1, 1, 1, 0, 0, 0 };
/* PCI230 analogue output range table */
-static const struct comedi_lrange pci230_ao_range = { 2, {
- UNI_RANGE(10),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange pci230_ao_range = {
+ 2, {
+ UNI_RANGE(10),
+ BIP_RANGE(10)
+ }
};
/* PCI230 daccon bipolar flag for each analogue output range. */
@@ -818,9 +820,9 @@ static int pci230_ai_rinsn(struct comedi_device *dev,
if (aref == AREF_DIFF) {
/* Differential. */
if (chan >= s->n_chan / 2) {
- DPRINTK("comedi%d: amplc_pci230: ai_rinsn: "
- "differential channel number out of range "
- "0 to %u\n", dev->minor, (s->n_chan / 2) - 1);
+ dev_dbg(dev->class_dev,
+ "%s: differential channel number out of range 0 to %u\n",
+ __func__, (s->n_chan / 2) - 1);
return -EINVAL;
}
}
@@ -1092,14 +1094,14 @@ static int pci230_ao_cmdtest(struct comedi_device *dev,
if (errors != 0) {
err++;
if ((errors & seq_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: "
- "channel numbers must increase\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel numbers must increase\n",
+ __func__);
}
if ((errors & range_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: "
- "channels must have the same range\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channels must have the same range\n",
+ __func__);
}
}
}
@@ -1835,33 +1837,29 @@ static int pci230_ai_cmdtest(struct comedi_device *dev,
if (errors != 0) {
err++;
if ((errors & seq_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "channel numbers must increase or "
- "sequence must repeat exactly\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel numbers must increase or sequence must repeat exactly\n",
+ __func__);
}
if ((errors & rangepair_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "single-ended channel pairs must "
- "have the same range\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: single-ended channel pairs must have the same range\n",
+ __func__);
}
if ((errors & polarity_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "channel sequence ranges must be all "
- "bipolar or all unipolar\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel sequence ranges must be all bipolar or all unipolar\n",
+ __func__);
}
if ((errors & aref_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "channel sequence analogue references "
- "must be all the same (single-ended "
- "or differential)\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel sequence analogue references must be all the same (single-ended or differential)\n",
+ __func__);
}
if ((errors & diffchan_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "differential channel number out of "
- "range 0 to %u\n", dev->minor,
- (s->n_chan / 2) - 1);
+ dev_dbg(dev->class_dev,
+ "%s: differential channel number out of range 0 to %u\n",
+ __func__, (s->n_chan / 2) - 1);
}
if ((errors & buggy_chan0_err) != 0) {
dev_info(dev->class_dev,
@@ -2637,7 +2635,7 @@ static int pci230_attach_common(struct comedi_device *dev,
struct comedi_subdevice *s;
unsigned long iobase1, iobase2;
/* PCI230's I/O spaces 1 and 2 respectively. */
- int irq_hdl, rc;
+ int rc;
comedi_set_hw_dev(dev, &pci_dev->dev);
@@ -2709,16 +2707,12 @@ static int pci230_attach_common(struct comedi_device *dev,
outw(devpriv->adcg, dev->iobase + PCI230_ADCG);
outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
dev->iobase + PCI230_ADCCON);
- /* Register the interrupt handler. */
- irq_hdl = request_irq(pci_dev->irq, pci230_interrupt,
- IRQF_SHARED, "amplc_pci230", dev);
- if (irq_hdl < 0) {
- dev_warn(dev->class_dev,
- "unable to register irq %u, commands will not be available\n",
- pci_dev->irq);
- } else {
- dev->irq = pci_dev->irq;
- dev_dbg(dev->class_dev, "registered irq %u\n", pci_dev->irq);
+
+ if (pci_dev->irq) {
+ rc = request_irq(pci_dev->irq, pci230_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (rc == 0)
+ dev->irq = pci_dev->irq;
}
rc = comedi_alloc_subdevices(dev, 3);
@@ -2734,14 +2728,14 @@ static int pci230_attach_common(struct comedi_device *dev,
s->range_table = &pci230_ai_range;
s->insn_read = &pci230_ai_rinsn;
s->len_chanlist = 256; /* but there are restrictions. */
- /* Only register commands if the interrupt handler is installed. */
- if (irq_hdl == 0) {
+ if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = &pci230_ai_cmd;
s->do_cmdtest = &pci230_ai_cmdtest;
s->cancel = pci230_ai_cancel;
}
+
s = &dev->subdevices[1];
/* analog output subdevice */
if (thisboard->ao_chans > 0) {
@@ -2753,9 +2747,7 @@ static int pci230_attach_common(struct comedi_device *dev,
s->insn_write = &pci230_ao_winsn;
s->insn_read = &pci230_ao_rinsn;
s->len_chanlist = thisboard->ao_chans;
- /* Only register commands if the interrupt handler is
- * installed. */
- if (irq_hdl == 0) {
+ if (dev->irq) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
s->do_cmd = &pci230_ao_cmd;
@@ -2765,6 +2757,7 @@ static int pci230_attach_common(struct comedi_device *dev,
} else {
s->type = COMEDI_SUBD_UNUSED;
}
+
s = &dev->subdevices[2];
/* digital i/o subdevice */
if (thisboard->have_dio) {
@@ -2856,7 +2849,7 @@ static int amplc_pci230_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(amplc_pci230_pci_table) = {
+static const struct pci_device_id amplc_pci230_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 4bd4ef8..be28e6c 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -96,7 +96,7 @@ static struct comedi_driver amplc_pci263_driver = {
.detach = comedi_pci_disable,
};
-static DEFINE_PCI_DEVICE_TABLE(pci263_pci_table) = {
+static const struct pci_device_id pci263_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 217aa19c..5034f66 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -94,8 +94,6 @@ static void C6X_pwmInit(unsigned long baseAddr)
{
int timeout = 0;
-/* printk("Inside C6X_pwmInit\n"); */
-
WriteByteToHwPort(baseAddr, 0x70);
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
@@ -132,8 +130,6 @@ static void C6X_pwmOutput(unsigned long baseAddr, unsigned channel, int value)
int timeout = 0;
unsigned tmp;
- /* printk("Inside C6X_pwmOutput\n"); */
-
pwm.cmd = value;
if (pwm.cmd > 498)
pwm.cmd = 498;
@@ -200,8 +196,6 @@ static int C6X_encInput(unsigned long baseAddr, unsigned channel)
int timeout = 0;
int tmp;
- /* printk("Inside C6X_encInput\n"); */
-
enc.value = 0;
if (channel == 0)
ppcmd = 0x48;
@@ -295,8 +289,6 @@ static void C6X_encResetAll(unsigned long baseAddr)
{
unsigned timeout = 0;
-/* printk("Inside C6X_encResetAll\n"); */
-
WriteByteToHwPort(baseAddr, 0x68);
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
@@ -322,14 +314,6 @@ static void C6X_encResetAll(unsigned long baseAddr)
}
}
-static int c6xdigio_pwmo_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- printk(KERN_DEBUG "c6xdigio_pwmo_insn_read %x\n", insn->n);
- return insn->n;
-}
-
static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -338,7 +322,6 @@ static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
- /* printk("c6xdigio_pwmo_insn_write %x\n", insn->n); */
for (i = 0; i < insn->n; i++) {
C6X_pwmOutput(dev->iobase, chan, data[i]);
/* devpriv->ao_readback[chan] = data[i]; */
@@ -346,31 +329,10 @@ static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
return i;
}
-/* static int c6xdigio_ei_init_insn_read(struct comedi_device *dev, */
-/* struct comedi_subdevice *s, */
-/* struct comedi_insn *insn, */
-/* unsigned int *data) */
-/* { */
-/* printk("c6xdigio_ei_init_insn_read %x\n", insn->n); */
-/* return insn->n; */
-/* } */
-
-/* static int c6xdigio_ei_init_insn_write(struct comedi_device *dev, */
-/* struct comedi_subdevice *s, */
-/* struct comedi_insn *insn, */
-/* unsigned int *data) */
-/* { */
-/* int i; */
-/* int chan = CR_CHAN(insn->chanspec); */
- /* *//* C6X_encResetAll( dev->iobase ); */
- /* *//* return insn->n; */
-/* } */
-
static int c6xdigio_ei_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- /* printk("c6xdigio_ei__insn_read %x\n", insn->n); */
int n;
int chan = CR_CHAN(insn->chanspec);
@@ -382,12 +344,8 @@ static int c6xdigio_ei_insn_read(struct comedi_device *dev,
static void board_init(struct comedi_device *dev)
{
-
- /* printk("Inside board_init\n"); */
-
C6X_pwmInit(dev->iobase);
C6X_encResetAll(dev->iobase);
-
}
static const struct pnp_device_id c6xdigio_pnp_tbl[] = {
@@ -426,7 +384,6 @@ static int c6xdigio_attach(struct comedi_device *dev,
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 2;
/* s->trig[0] = c6xdigio_pwmo; */
- s->insn_read = c6xdigio_pwmo_insn_read;
s->insn_write = c6xdigio_pwmo_insn_write;
s->maxdata = 500;
s->range_table = &range_bipolar10; /* A suitable lie */
@@ -441,17 +398,6 @@ static int c6xdigio_attach(struct comedi_device *dev,
s->maxdata = 0xffffff;
s->range_table = &range_unknown;
- /* s = &dev->subdevices[2]; */
- /* pwm output subdevice */
- /* s->type = COMEDI_SUBD_COUNTER; // Not sure what to put here */
- /* s->subdev_flags = SDF_WRITEABLE; */
- /* s->n_chan = 1; */
- /* s->trig[0] = c6xdigio_ei_init; */
- /* s->insn_read = c6xdigio_ei_init_insn_read; */
- /* s->insn_write = c6xdigio_ei_init_insn_write; */
- /* s->maxdata = 0xFFFF; // Really just a don't care */
- /* s->range_table = &range_unknown; // Not sure what to put here */
-
/* I will call this init anyway but more than likely the DSP board */
/* will not be connected when device driver is loaded. */
board_init(dev);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index e72a403..9819be0 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -181,43 +181,40 @@ static inline unsigned int DAC_DATA_REG(unsigned int channel)
/* analog input ranges for most boards */
static const struct comedi_lrange cb_pcidas_ranges = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
/* pci-das1001 input ranges */
static const struct comedi_lrange cb_pcidas_alt_ranges = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
/* analog output ranges */
static const struct comedi_lrange cb_pcidas_ao_ranges = {
- 4,
- {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
enum trimpot_model {
@@ -1614,7 +1611,7 @@ static int cb_pcidas_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = {
+static const struct pci_device_id cb_pcidas_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0001), BOARD_PCIDAS1602_16 },
{ PCI_VDEVICE(CB, 0x000f), BOARD_PCIDAS1200 },
{ PCI_VDEVICE(CB, 0x0010), BOARD_PCIDAS1602_12 },
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index ff52065..4fff173 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -94,15 +94,6 @@ TODO:
#include "plx9080.h"
#include "comedi_fc.h"
-#undef PCIDAS64_DEBUG /* disable debugging code */
-/* #define PCIDAS64_DEBUG enable debugging code */
-
-#ifdef PCIDAS64_DEBUG
-#define DEBUG_PRINT(format, args...) pr_debug(format, ## args)
-#else
-#define DEBUG_PRINT(format, args...) no_printk(format, ## args)
-#endif
-
#define TIMER_BASE 25 /* 40MHz master clock */
/* 100kHz 'prescaled' clock for slow acquisition,
* maybe I'll support this someday */
@@ -438,91 +429,85 @@ static inline uint8_t attenuate_bit(unsigned int channel)
/* analog input ranges for 64xx boards */
static const struct comedi_lrange ai_ranges_64xx = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
/* analog input ranges for 60xx boards */
static const struct comedi_lrange ai_ranges_60xx = {
- 4,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- }
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05)
+ }
};
/* analog input ranges for 6030, etc boards */
static const struct comedi_lrange ai_ranges_6030 = {
- 14,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.2),
- BIP_RANGE(0.1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1),
- }
+ 14, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2),
+ BIP_RANGE(0.1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
/* analog input ranges for 6052, etc boards */
static const struct comedi_lrange ai_ranges_6052 = {
- 15,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1),
- }
+ 15, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.25),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.05),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
/* analog input ranges for 4020 board */
static const struct comedi_lrange ai_ranges_4020 = {
- 2,
- {
- BIP_RANGE(5),
- BIP_RANGE(1),
- }
+ 2, {
+ BIP_RANGE(5),
+ BIP_RANGE(1)
+ }
};
/* analog output ranges */
static const struct comedi_lrange ao_ranges_64xx = {
- 4,
- {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
static const int ao_range_code_64xx[] = {
@@ -537,11 +522,10 @@ static const int ao_range_code_60xx[] = {
};
static const struct comedi_lrange ao_ranges_6030 = {
- 2,
- {
- BIP_RANGE(10),
- UNI_RANGE(10),
- }
+ 2, {
+ BIP_RANGE(10),
+ UNI_RANGE(10)
+ }
};
static const int ao_range_code_6030[] = {
@@ -550,11 +534,10 @@ static const int ao_range_code_6030[] = {
};
static const struct comedi_lrange ao_ranges_4020 = {
- 2,
- {
- BIP_RANGE(5),
- BIP_RANGE(10),
- }
+ 2, {
+ BIP_RANGE(5),
+ BIP_RANGE(10)
+ }
};
static const int ao_range_code_4020[] = {
@@ -1252,8 +1235,6 @@ static void disable_ai_interrupts(struct comedi_device *dev)
writew(devpriv->intr_enable_bits,
devpriv->main_iobase + INTR_ENABLE_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
-
- DEBUG_PRINT("intr enable bits 0x%x\n", devpriv->intr_enable_bits);
}
static void enable_ai_interrupts(struct comedi_device *dev,
@@ -1277,7 +1258,6 @@ static void enable_ai_interrupts(struct comedi_device *dev,
devpriv->intr_enable_bits |= bits;
writew(devpriv->intr_enable_bits,
devpriv->main_iobase + INTR_ENABLE_REG);
- DEBUG_PRINT("intr enable bits 0x%x\n", devpriv->intr_enable_bits);
spin_unlock_irqrestore(&dev->spinlock, flags);
}
@@ -1292,38 +1272,6 @@ static void init_plx9080(struct comedi_device *dev)
devpriv->plx_control_bits =
readl(devpriv->plx9080_iobase + PLX_CONTROL_REG);
- /* plx9080 dump */
- DEBUG_PRINT(" plx interrupt status 0x%x\n",
- readl(plx_iobase + PLX_INTRCS_REG));
- DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG));
- DEBUG_PRINT(" plx control reg 0x%x\n", devpriv->plx_control_bits);
- DEBUG_PRINT(" plx mode/arbitration reg 0x%x\n",
- readl(plx_iobase + PLX_MARB_REG));
- DEBUG_PRINT(" plx region0 reg 0x%x\n",
- readl(plx_iobase + PLX_REGION0_REG));
- DEBUG_PRINT(" plx region1 reg 0x%x\n",
- readl(plx_iobase + PLX_REGION1_REG));
-
- DEBUG_PRINT(" plx revision 0x%x\n",
- readl(plx_iobase + PLX_REVISION_REG));
- DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA0_MODE_REG));
- DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA1_MODE_REG));
- DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n",
- readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG));
- DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n",
- readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG));
- DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n",
- readb(plx_iobase + PLX_DMA0_CS_REG));
- DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n",
- readl(plx_iobase + PLX_DMA0_THRESHOLD_REG));
- DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG));
-
#ifdef __BIG_ENDIAN
bits = BIGEND_DMA0 | BIGEND_DMA1;
#else
@@ -1417,9 +1365,6 @@ static int set_ai_fifo_segment_length(struct comedi_device *dev,
devpriv->ai_fifo_segment_length = num_increments * increment_size;
- DEBUG_PRINT("set hardware fifo segment length to %i\n",
- devpriv->ai_fifo_segment_length);
-
return devpriv->ai_fifo_segment_length;
}
@@ -1441,8 +1386,6 @@ static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples)
num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio;
- DEBUG_PRINT("set hardware fifo size to %i\n", num_samples);
-
return num_samples;
}
@@ -1538,8 +1481,6 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
if (devpriv->ai_dma_desc == NULL)
return -ENOMEM;
- DEBUG_PRINT("ai dma descriptors start at bus addr 0x%llx\n",
- (unsigned long long)devpriv->ai_dma_desc_bus_addr);
if (ao_cmd_is_supported(thisboard)) {
devpriv->ao_dma_desc =
pci_alloc_consistent(pcidev,
@@ -1548,9 +1489,6 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
&devpriv->ao_dma_desc_bus_addr);
if (devpriv->ao_dma_desc == NULL)
return -ENOMEM;
-
- DEBUG_PRINT("ao dma descriptors start at bus addr 0x%llx\n",
- (unsigned long long)devpriv->ao_dma_desc_bus_addr);
}
/* initialize dma descriptors */
for (i = 0; i < ai_dma_ring_count(thisboard); i++) {
@@ -1650,8 +1588,6 @@ static void i2c_write_byte(struct comedi_device *dev, uint8_t byte)
uint8_t bit;
unsigned int num_bits = 8;
- DEBUG_PRINT("writing to i2c byte 0x%x\n", byte);
-
for (bit = 1 << (num_bits - 1); bit; bit >>= 1) {
i2c_set_scl(dev, 0);
if ((byte & bit))
@@ -1738,7 +1674,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long flags;
static const int timeout = 100;
- DEBUG_PRINT("chanspec 0x%x\n", insn->chanspec);
channel = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
aref = CR_AREF(insn->chanspec);
@@ -1766,7 +1701,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
if (insn->chanspec & CR_ALT_SOURCE) {
unsigned int cal_en_bit;
- DEBUG_PRINT("reading calibration source\n");
if (thisboard->layout == LAYOUT_60XX)
cal_en_bit = CAL_EN_60XX_BIT;
else
@@ -1800,7 +1734,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK;
if (insn->chanspec & CR_ALT_SOURCE) {
- DEBUG_PRINT("reading calibration source\n");
devpriv->i2c_cal_range_bits |=
adc_src_4020_bits(devpriv->calibration_source);
} else { /* select BNC inputs */
@@ -1839,7 +1772,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* wait for data */
for (i = 0; i < timeout; i++) {
bits = readw(devpriv->main_iobase + HW_STATUS_REG);
- DEBUG_PRINT(" pipe bits 0x%x\n", pipe_full_bits(bits));
if (thisboard->layout == LAYOUT_4020) {
if (readw(devpriv->main_iobase +
ADC_WRITE_PNTR_REG))
@@ -1850,7 +1782,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
}
udelay(1);
}
- DEBUG_PRINT(" looped %i times waiting for data\n", i);
if (i == timeout) {
comedi_error(dev, " analog input read insn timed out");
dev_info(dev->class_dev, "status 0x%x\n", bits);
@@ -1884,7 +1815,6 @@ static int ai_config_calibration_source(struct comedi_device *dev,
return -EINVAL;
}
- DEBUG_PRINT("setting calibration source to %i\n", source);
devpriv->calibration_source = source;
return 2;
@@ -2368,7 +2298,6 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd)
/* load lower 16 bits of convert interval */
writew(convert_counter & 0xffff,
devpriv->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG);
- DEBUG_PRINT("convert counter 0x%x\n", convert_counter);
/* load upper 8 bits of convert interval */
writew((convert_counter >> 16) & 0xff,
devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
@@ -2378,7 +2307,6 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd)
/* load upper 8 bits of scan delay */
writew((scan_counter >> 16) & 0xff,
devpriv->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG);
- DEBUG_PRINT("scan counter 0x%x\n", scan_counter);
}
static int use_internal_queue_6xxx(const struct comedi_cmd *cmd)
@@ -2469,9 +2397,6 @@ static int setup_channel_queue(struct comedi_device *dev,
writew(bits,
devpriv->main_iobase +
ADC_QUEUE_FIFO_REG);
- DEBUG_PRINT(
- "wrote 0x%x to external channel queue\n",
- bits);
}
/* doing a queue clear is not specified in board docs,
* but required for reliable operation */
@@ -2593,7 +2518,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
writew(devpriv->adc_control1_bits,
devpriv->main_iobase + ADC_CONTROL1_REG);
- DEBUG_PRINT("control1 bits 0x%x\n", devpriv->adc_control1_bits);
spin_unlock_irqrestore(&dev->spinlock, flags);
/* clear adc buffer */
@@ -2645,17 +2569,14 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (use_hw_sample_counter(cmd))
bits |= ADC_SAMPLE_COUNTER_EN_BIT;
writew(bits, devpriv->main_iobase + ADC_CONTROL0_REG);
- DEBUG_PRINT("control0 bits 0x%x\n", bits);
devpriv->ai_cmd_running = 1;
spin_unlock_irqrestore(&dev->spinlock, flags);
/* start acquisition */
- if (cmd->start_src == TRIG_NOW) {
+ if (cmd->start_src == TRIG_NOW)
writew(0, devpriv->main_iobase + ADC_START_REG);
- DEBUG_PRINT("soft trig\n");
- }
return 0;
}
@@ -2690,10 +2611,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
read_segment = adc_upper_read_ptr_code(prepost_bits);
write_segment = adc_upper_write_ptr_code(prepost_bits);
- DEBUG_PRINT(" rd seg %i, wrt seg %i, rd idx %i, wrt idx %i\n",
- read_segment, write_segment, read_index,
- write_index);
-
if (read_segment != write_segment)
num_samples =
devpriv->ai_fifo_segment_length - read_index;
@@ -2715,8 +2632,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
break;
}
- DEBUG_PRINT(" read %i samples from fifo\n", num_samples);
-
for (i = 0; i < num_samples; i++) {
cfc_write_to_buffer(s,
readw(devpriv->main_iobase +
@@ -2812,11 +2727,6 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
num_samples * sizeof(uint16_t));
devpriv->ai_dma_index = (devpriv->ai_dma_index + 1) %
ai_dma_ring_count(thisboard);
-
- DEBUG_PRINT("next buffer addr 0x%lx\n",
- (unsigned long)devpriv->
- ai_buffer_bus_addr[devpriv->ai_dma_index]);
- DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr);
}
/* XXX check for dma ring buffer overrun
* (use end-of-chain bit to mark last unused buffer) */
@@ -2845,24 +2755,17 @@ static void handle_ai_interrupt(struct comedi_device *dev,
if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */
writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- DEBUG_PRINT("dma1 status 0x%x\n", dma1_status);
if (dma1_status & PLX_DMA_EN_BIT)
drain_dma_buffers(dev, 1);
-
- DEBUG_PRINT(" cleared dma ch1 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
- if (status & ADC_DONE_BIT)
- DEBUG_PRINT("adc done interrupt\n");
-
/* drain fifo with pio */
if ((status & ADC_DONE_BIT) ||
((cmd->flags & TRIG_WAKE_EOS) &&
(status & ADC_INTR_PENDING_BIT) &&
(thisboard->layout != LAYOUT_4020))) {
- DEBUG_PRINT("pio fifo drain\n");
spin_lock_irqsave(&dev->spinlock, flags);
if (devpriv->ai_cmd_running) {
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -2947,7 +2850,6 @@ static void restart_ao_dma(struct comedi_device *dev)
dma_desc_bits =
readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT;
- DEBUG_PRINT("restarting ao dma, descriptor reg 0x%x\n", dma_desc_bits);
load_first_dma_descriptor(dev, 0, dma_desc_bits);
dma_start_sync(dev, 0);
@@ -2963,10 +2865,6 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
buffer_index = devpriv->ao_dma_index;
prev_buffer_index = prev_ao_dma_index(dev);
- DEBUG_PRINT("attempting to load ao buffer %i (0x%llx)\n", buffer_index,
- (unsigned long long)devpriv->ao_buffer_bus_addr[
- buffer_index]);
-
num_bytes = comedi_buf_read_n_available(dev->write_subdev->async);
if (num_bytes > DMA_BUFFER_SIZE)
num_bytes = DMA_BUFFER_SIZE;
@@ -2977,8 +2875,6 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
if (num_bytes == 0)
return 0;
- DEBUG_PRINT("loading %i bytes\n", num_bytes);
-
num_bytes = cfc_read_array_from_buffer(dev->write_subdev,
devpriv->
ao_buffer[buffer_index],
@@ -3052,14 +2948,12 @@ static void handle_ao_interrupt(struct comedi_device *dev,
writeb(PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
- DEBUG_PRINT("dma0 status 0x%x\n", dma0_status);
if (dma0_status & PLX_DMA_EN_BIT) {
load_ao_dma(dev, cmd);
/* try to recover from dma end-of-chain event */
if (ao_dma_needs_restart(dev, dma0_status))
restart_ao_dma(dev);
}
- DEBUG_PRINT(" cleared dma ch0 interrupt\n");
} else {
spin_unlock_irqrestore(&dev->spinlock, flags);
}
@@ -3068,12 +2962,6 @@ static void handle_ao_interrupt(struct comedi_device *dev,
async->events |= COMEDI_CB_EOA;
if (ao_stopped_by_error(dev, cmd))
async->events |= COMEDI_CB_ERROR;
- DEBUG_PRINT("plx dma0 desc reg 0x%x\n",
- readl(devpriv->plx9080_iobase +
- PLX_DMA0_DESCRIPTOR_REG));
- DEBUG_PRINT("plx dma0 address reg 0x%x\n",
- readl(devpriv->plx9080_iobase +
- PLX_DMA0_PCI_ADDRESS_REG));
}
cfc_handle_events(dev, s);
}
@@ -3089,15 +2977,12 @@ static irqreturn_t handle_interrupt(int irq, void *d)
plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
status = readw(devpriv->main_iobase + HW_STATUS_REG);
- DEBUG_PRINT("hw status 0x%x, plx status 0x%x\n", status, plx_status);
-
/* an interrupt before all the postconfig stuff gets done could
* cause a NULL dereference if we continue through the
* interrupt handler */
- if (!dev->attached) {
- DEBUG_PRINT("premature interrupt, ignoring\n");
+ if (!dev->attached)
return IRQ_HANDLED;
- }
+
handle_ai_interrupt(dev, status, plx_status);
handle_ao_interrupt(dev, status, plx_status);
@@ -3105,11 +2990,8 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits);
}
- DEBUG_PRINT("exiting handler\n");
-
return IRQ_HANDLED;
}
@@ -3130,7 +3012,6 @@ static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
abort_dma(dev, 1);
- DEBUG_PRINT("ai canceled\n");
return 0;
}
@@ -3458,7 +3339,6 @@ static int dio_callback(int dir, int port, int data, unsigned long arg)
void __iomem *iobase = (void __iomem *)arg;
if (dir) {
writeb(data, iobase + port);
- DEBUG_PRINT("wrote 0x%x to port %i\n", data, port);
return 0;
} else {
return readb(iobase + port);
@@ -4046,11 +3926,6 @@ static int auto_attach(struct comedi_device *dev,
return -ENOMEM;
}
- DEBUG_PRINT(" plx9080 remapped to 0x%p\n", devpriv->plx9080_iobase);
- DEBUG_PRINT(" main remapped to 0x%p\n", devpriv->main_iobase);
- DEBUG_PRINT(" diocounter remapped to 0x%p\n",
- devpriv->dio_counter_iobase);
-
/* figure out what local addresses are */
local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) &
LRNG_MEM_MASK;
@@ -4065,9 +3940,6 @@ static int auto_attach(struct comedi_device *dev,
devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase &
~local_range) | local_decode;
- DEBUG_PRINT(" local 0 io addr 0x%x\n", devpriv->local0_iobase);
- DEBUG_PRINT(" local 1 io addr 0x%x\n", devpriv->local1_iobase);
-
retval = alloc_and_init_dma_members(dev);
if (retval < 0)
return retval;
@@ -4161,7 +4033,7 @@ static int cb_pcidas64_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcidas64_pci_table) = {
+static const struct pci_device_id cb_pcidas64_pci_table[] = {
{ PCI_VDEVICE(CB, 0x001d), BOARD_PCIDAS6402_16 },
{ PCI_VDEVICE(CB, 0x001e), BOARD_PCIDAS6402_12 },
{ PCI_VDEVICE(CB, 0x0035), BOARD_PCIDAS64_M1_16 },
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 94f1158..8cca051 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -407,7 +407,7 @@ static int cb_pcidda_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcidda_pci_table) = {
+static const struct pci_device_id cb_pcidda_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0020), BOARD_DDA02_12 },
{ PCI_VDEVICE(CB, 0x0021), BOARD_DDA04_12 },
{ PCI_VDEVICE(CB, 0x0022), BOARD_DDA08_12 },
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 30520d4..57295d1 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -44,9 +44,6 @@ See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
#include "plx9052.h"
#include "8255.h"
-/* #define CBPCIMDAS_DEBUG */
-#undef CBPCIMDAS_DEBUG
-
/* Registers for the PCIM-DAS1602/16 */
/* sizes of io regions (bytes) */
@@ -145,10 +142,9 @@ static int cb_pcimdas_ai_rinsn(struct comedi_device *dev,
if (!busy)
break;
}
- if (i == TIMEOUT) {
- printk("timeout\n");
+ if (i == TIMEOUT)
return -ETIMEDOUT;
- }
+
/* read data */
data[n] = inw(dev->iobase + 0);
}
@@ -222,15 +218,6 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
devpriv->BADR3 = pci_resource_start(pcidev, 3);
iobase_8255 = pci_resource_start(pcidev, 4);
-/* Dont support IRQ yet */
-/* get irq */
-/* if(request_irq(pcidev->irq, cb_pcimdas_interrupt, IRQF_SHARED, "cb_pcimdas", dev )) */
-/* { */
-/* printk(" unable to allocate irq %u\n", pcidev->irq); */
-/* return -EINVAL; */
-/* } */
-/* dev->irq = pcidev->irq; */
-
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
@@ -288,7 +275,7 @@ static int cb_pcimdas_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcimdas_pci_table) = {
+static const struct pci_device_id cb_pcimdas_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index edf17b6..43a8663 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -206,7 +206,7 @@ static int cb_pcimdda_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcimdda_pci_table) = {
+static const struct pci_device_id cb_pcimdda_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_ID_PCIM_DDA06_16) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 16c0780..d539eaf 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -74,11 +74,10 @@ static const int nano_per_micro = 1000;
/* fake analog input ranges */
static const struct comedi_lrange waveform_ai_ranges = {
- 2,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- }
+ 2, {
+ BIP_RANGE(10),
+ BIP_RANGE(5)
+ }
};
static unsigned short fake_sawtooth(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 89836c0..323a7f3 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -111,7 +111,7 @@ static int contec_pci_dio_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(contec_pci_dio_pci_table) = {
+static const struct pci_device_id contec_pci_dio_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index de920cc..ce153fc 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -772,7 +772,7 @@ static int daqboard2000_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = {
+static const struct pci_device_id daqboard2000_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IOTECH, 0x0409) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 15dd33e..e5c0ee9 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -120,46 +120,49 @@
/* gainlist same as _pgx_ below */
-static const struct comedi_lrange range_das08_pgl = { 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+static const struct comedi_lrange range_das08_pgl = {
+ 9, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das08_pgh = { 12, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- }
+static const struct comedi_lrange range_das08_pgh = {
+ 12, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_das08_pgm = { 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+static const struct comedi_lrange range_das08_pgm = {
+ 9, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
}; /*
cio-das08jr.pdf
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
index 3a6d372..d94af09 100644
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -89,7 +89,7 @@ static int das08_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
+static const struct pci_device_id das08_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_PCIDAS08) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index fce9acf..fee5fac 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -110,18 +110,18 @@ irq can be omitted, although the cmd interface will not work without it.
#define DAS16M1_82C55 0x400
#define DAS16M1_8254_THIRD 0x404
-static const struct comedi_lrange range_das16m1 = { 9,
- {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10),
- }
+static const struct comedi_lrange range_das16m1 = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
struct das16m1_private_struct {
@@ -269,11 +269,6 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
struct comedi_cmd *cmd = &async->cmd;
unsigned int byte, i;
- if (dev->irq == 0) {
- comedi_error(dev, "irq required to execute comedi_cmd");
- return -1;
- }
-
/* disable interrupts and internal pacer */
devpriv->control_state &= ~INTE & ~PACER_MASK;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
@@ -508,38 +503,26 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
static int das16m1_irq_bits(unsigned int irq)
{
- int ret;
-
switch (irq) {
case 10:
- ret = 0x0;
- break;
+ return 0x0;
case 11:
- ret = 0x1;
- break;
+ return 0x1;
case 12:
- ret = 0x2;
- break;
+ return 0x2;
case 15:
- ret = 0x3;
- break;
+ return 0x3;
case 2:
- ret = 0x4;
- break;
+ return 0x4;
case 3:
- ret = 0x5;
- break;
+ return 0x5;
case 5:
- ret = 0x6;
- break;
+ return 0x6;
case 7:
- ret = 0x7;
- break;
+ return 0x7;
default:
- return -1;
- break;
+ return 0x0;
}
- return ret << 4;
}
/*
@@ -553,7 +536,6 @@ static int das16m1_attach(struct comedi_device *dev,
struct das16m1_private_struct *devpriv;
struct comedi_subdevice *s;
int ret;
- unsigned int irq;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -569,24 +551,12 @@ static int das16m1_attach(struct comedi_device *dev,
return ret;
devpriv->extra_iobase = dev->iobase + DAS16M1_82C55;
- /* now for the irq */
- irq = it->options[1];
- /* make sure it is valid */
- if (das16m1_irq_bits(irq) >= 0) {
- ret = request_irq(irq, das16m1_interrupt, 0,
- dev->driver->driver_name, dev);
- if (ret < 0)
- return ret;
- dev->irq = irq;
- printk
- ("irq %u\n", irq);
- } else if (irq == 0) {
- printk
- (", no irq\n");
- } else {
- comedi_error(dev, "invalid irq\n"
- " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
- return -EINVAL;
+ /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
+ if ((1 << it->options[1]) & 0xdcfc) {
+ ret = request_irq(it->options[1], das16m1_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, 4);
@@ -594,20 +564,22 @@ static int das16m1_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = 8;
- s->subdev_flags = SDF_DIFF;
- s->len_chanlist = 256;
s->maxdata = (1 << 12) - 1;
s->range_table = &range_das16m1;
s->insn_read = das16m1_ai_rinsn;
- s->do_cmdtest = das16m1_cmd_test;
- s->do_cmd = das16m1_cmd_exec;
- s->cancel = das16m1_cancel;
- s->poll = das16m1_poll;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 256;
+ s->do_cmdtest = das16m1_cmd_test;
+ s->do_cmd = das16m1_cmd_exec;
+ s->cancel = das16m1_cancel;
+ s->poll = das16m1_poll;
+ }
s = &dev->subdevices[1];
/* di */
@@ -640,10 +612,7 @@ static int das16m1_attach(struct comedi_device *dev,
outb(0, dev->iobase + DAS16M1_DIO);
/* set the interrupt level */
- if (dev->irq)
- devpriv->control_state = das16m1_irq_bits(dev->irq);
- else
- devpriv->control_state = 0;
+ devpriv->control_state = das16m1_irq_bits(dev->irq) << 4;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
return 0;
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 1880038..320d95a 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -178,31 +178,29 @@ enum {
/* analog input ranges */
static const struct comedi_lrange range_ai_das1801 = {
- 8,
- {
- RANGE(-5, 5),
- RANGE(-1, 1),
- RANGE(-0.1, 0.1),
- RANGE(-0.02, 0.02),
- RANGE(0, 5),
- RANGE(0, 1),
- RANGE(0, 0.1),
- RANGE(0, 0.02),
- }
+ 8, {
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02),
+ UNI_RANGE(5),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
+ }
};
static const struct comedi_lrange range_ai_das1802 = {
- 8,
- {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25),
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
struct das1800_board {
@@ -445,10 +443,9 @@ struct das1800_private {
/* analog out range for 'ao' boards */
/*
static const struct comedi_lrange range_ao_2 = {
- 2,
- {
- RANGE(-10, 10),
- RANGE(-5, 5),
+ 2, {
+ BIP_RANGE(10),
+ BIP_RANGE(5)
}
};
*/
@@ -462,7 +459,7 @@ static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
return sample;
}
-static void munge_data(struct comedi_device *dev, uint16_t * array,
+static void munge_data(struct comedi_device *dev, uint16_t *array,
unsigned int num_elements)
{
unsigned int i;
@@ -644,7 +641,7 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void das1800_ai_handler(struct comedi_device *dev)
{
struct das1800_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
@@ -1150,12 +1147,6 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
struct comedi_async *async = s->async;
const struct comedi_cmd *cmd = &async->cmd;
- if (!dev->irq) {
- comedi_error(dev,
- "no irq assigned for das-1800, cannot do hardware conversions");
- return -1;
- }
-
/* disable dma on TRIG_WAKE_EOS, or TRIG_RT
* (because dma in handler is unsafe at hard real-time priority) */
if (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT))
@@ -1522,43 +1513,34 @@ static int das1800_attach(struct comedi_device *dev,
devpriv->iobase2 = iobase2;
}
- /* grab our IRQ */
- if (irq) {
- if (request_irq(irq, das1800_interrupt, 0,
- dev->driver->driver_name, dev)) {
- dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
- irq);
- return -EINVAL;
- }
- }
- dev->irq = irq;
+ if (irq == 3 || irq == 5 || irq == 7 || irq == 10 || irq == 11 ||
+ irq == 15) {
+ ret = request_irq(irq, das1800_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = irq;
- /* set bits that tell card which irq to use */
- switch (irq) {
- case 0:
- break;
- case 3:
- devpriv->irq_dma_bits |= 0x8;
- break;
- case 5:
- devpriv->irq_dma_bits |= 0x10;
- break;
- case 7:
- devpriv->irq_dma_bits |= 0x18;
- break;
- case 10:
- devpriv->irq_dma_bits |= 0x28;
- break;
- case 11:
- devpriv->irq_dma_bits |= 0x30;
- break;
- case 15:
- devpriv->irq_dma_bits |= 0x38;
- break;
- default:
- dev_err(dev->class_dev, "irq out of range\n");
- return -EINVAL;
- break;
+ switch (irq) {
+ case 3:
+ devpriv->irq_dma_bits |= 0x8;
+ break;
+ case 5:
+ devpriv->irq_dma_bits |= 0x10;
+ break;
+ case 7:
+ devpriv->irq_dma_bits |= 0x18;
+ break;
+ case 10:
+ devpriv->irq_dma_bits |= 0x28;
+ break;
+ case 11:
+ devpriv->irq_dma_bits |= 0x30;
+ break;
+ case 15:
+ devpriv->irq_dma_bits |= 0x38;
+ break;
+ }
+ }
}
ret = das1800_init_dma(dev, dma0, dma1);
@@ -1578,20 +1560,23 @@ static int das1800_attach(struct comedi_device *dev,
/* analog input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
if (thisboard->common)
s->subdev_flags |= SDF_COMMON;
s->n_chan = thisboard->qram_len;
- s->len_chanlist = thisboard->qram_len;
s->maxdata = (1 << thisboard->resolution) - 1;
s->range_table = thisboard->range_ai;
- s->do_cmd = das1800_ai_do_cmd;
- s->do_cmdtest = das1800_ai_do_cmdtest;
s->insn_read = das1800_ai_rinsn;
- s->poll = das1800_ai_poll;
- s->cancel = das1800_cancel;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = das1800_ai_do_cmd;
+ s->do_cmdtest = das1800_ai_do_cmdtest;
+ s->poll = das1800_ai_poll;
+ s->cancel = das1800_cancel;
+ }
/* analog out */
s = &dev->subdevices[1];
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index fb25cb8..43027ee 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -152,21 +152,12 @@ static irqreturn_t intr_handler(int irq, void *d)
dev_warn(dev->class_dev, "BUG: spurious interrupt\n");
return IRQ_HANDLED;
}
-#ifdef DEBUG
- printk("das6402: interrupt! das6402_irqcount=%i\n",
- devpriv->das6402_irqcount);
- printk("das6402: iobase+2=%i\n", inw_p(dev->iobase + 2));
-#endif
das6402_ai_fifo_dregs(dev, s);
if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) {
outw_p(SCANL, dev->iobase + 2); /* clears the fifo */
outb(0x07, dev->iobase + 8); /* clears all flip-flops */
-#ifdef DEBUG
- printk("das6402: Got %i samples\n\n",
- devpriv->das6402_wordsread - diff);
-#endif
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
}
@@ -211,7 +202,7 @@ static int das6402_ai_cancel(struct comedi_device *dev,
#ifdef unused
static int das6402_ai_mode2(struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig * it)
+ struct comedi_subdevice *s, comedi_trig *it)
{
struct das6402_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index b04a5633..78a1962 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -124,13 +124,12 @@ Configuration Options:
/* board AI ranges in comedi structure */
static const struct comedi_lrange dmm32at_airanges = {
- 4,
- {
- UNI_RANGE(10),
- UNI_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(5),
- }
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ BIP_RANGE(10),
+ BIP_RANGE(5)
+ }
};
/* register values for above ranges */
@@ -145,13 +144,12 @@ static const unsigned char dmm32at_rangebits[] = {
* board. The application should only use the range set by the jumper
*/
static const struct comedi_lrange dmm32at_aoranges = {
- 4,
- {
- UNI_RANGE(10),
- UNI_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(5),
- }
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ BIP_RANGE(10),
+ BIP_RANGE(5)
+ }
};
struct dmm32at_private {
@@ -182,8 +180,6 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1);
range = CR_RANGE(insn->chanspec);
- /* printk("channel=0x%02x, range=%d\n",chan,range); */
-
/* zero scan and fifo control and reset fifo */
outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL);
@@ -199,10 +195,8 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
if ((status & DMM32AT_STATUS) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
/* convert n samples */
for (n = 0; n < insn->n; n++) {
@@ -214,10 +208,8 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
if ((status & DMM32AT_STATUS) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
/* read data */
lsb = inb(dev->iobase + DMM32AT_AILSB);
@@ -453,10 +445,8 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if ((status & DMM32AT_STATUS) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
if (devpriv->ai_scans_left > 1) {
/* start the clock and enable the interrupts */
@@ -467,8 +457,6 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outb(0xff, dev->iobase + DMM32AT_CONV);
}
-/* printk("dmmat32 in command\n"); */
-
/* for(i=0;i<cmd->chanlist_len;i++) */
/* comedi_buf_put(s->async,i*100); */
@@ -556,7 +544,6 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
lo = data[i] & 0x00ff;
/* high byte also contains channel number */
hi = (data[i] >> 8) + chan * (1 << 6);
- /* printk("writing 0x%02x 0x%02x\n",hi,lo); */
/* write the low and high values to the board */
outb(lo, dev->iobase + DMM32AT_DACLSB);
outb(hi, dev->iobase + DMM32AT_DACMSB);
@@ -567,10 +554,9 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
if ((status & DMM32AT_DACBUSY) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
+
/* dummy read to update trigger the output */
status = inb(dev->iobase + DMM32AT_DACMSB);
@@ -682,9 +668,6 @@ static int dmm32at_attach(struct comedi_device *dev,
int ret;
struct comedi_subdevice *s;
unsigned char aihi, ailo, fifostat, aistat, intstat, airback;
- unsigned int irq;
-
- irq = it->options[1];
ret = comedi_request_region(dev, it->options[0], DMM32AT_MEMSIZE);
if (ret)
@@ -723,26 +706,17 @@ static int dmm32at_attach(struct comedi_device *dev,
intstat = inb(dev->iobase + DMM32AT_INTCLOCK);
airback = inb(dev->iobase + DMM32AT_AIRBACK);
- printk(KERN_DEBUG "dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n",
- ailo, aihi, fifostat);
- printk(KERN_DEBUG
- "dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n",
- aistat, intstat, airback);
-
if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) ||
(aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) {
- printk(KERN_ERR "dmmat32: board detection failed\n");
+ dev_err(dev->class_dev, "board detection failed\n");
return -EIO;
}
- /* board is there, register interrupt */
- if (irq) {
- ret = request_irq(irq, dmm32at_isr, 0, dev->board_name, dev);
- if (ret < 0) {
- printk(KERN_ERR "dmm32at: irq conflict\n");
- return ret;
- }
- dev->irq = irq;
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], dmm32at_isr, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
@@ -754,20 +728,22 @@ static int dmm32at_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
/* we support single-ended (ground) and differential */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 32;
s->maxdata = 0xffff;
s->range_table = &dmm32at_airanges;
- s->len_chanlist = 32; /* This is the maximum chanlist length that
- the board can handle */
s->insn_read = dmm32at_ai_rinsn;
- s->do_cmd = dmm32at_ai_cmd;
- s->do_cmdtest = dmm32at_ai_cmdtest;
- s->cancel = dmm32at_ai_cancel;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 32;
+ s->do_cmd = dmm32at_ai_cmd;
+ s->do_cmdtest = dmm32at_ai_cmdtest;
+ s->cancel = dmm32at_ai_cancel;
+ }
s = &dev->subdevices[1];
/* analog output subdevice */
@@ -799,11 +775,7 @@ static int dmm32at_attach(struct comedi_device *dev,
s->insn_bits = dmm32at_dio_insn_bits;
s->insn_config = dmm32at_dio_insn_config;
- /* success */
- printk(KERN_INFO "comedi%d: dmm32at: attached\n", dev->minor);
-
- return 1;
-
+ return 0;
}
static struct comedi_driver dmm32at_driver = {
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 811c8c5..d4d4e4b 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -90,58 +90,42 @@ Configuration options:
#if 0
/* ignore 'defined but not used' warning */
-static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = { 4, {
- RANGE(-10,
- 10),
- RANGE(-5,
- 5),
- RANGE
- (-2.5,
- 2.5),
- RANGE
- (-1.25,
- 1.25),
- }
+static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
};
#endif
-static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = { 4, {
- RANGE(-10,
- 10),
- RANGE(-1,
- 1),
- RANGE
- (-0.1,
- 0.1),
- RANGE
- (-0.02,
- 0.02),
- }
+static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02)
+ }
};
#if 0
/* ignore 'defined but not used' warning */
-static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = { 4, {
- RANGE(0,
- 10),
- RANGE(0,
- 5),
- RANGE(0,
- 2.5),
- RANGE(0,
- 1.25),
- }
+static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
#endif
-static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = { 4, {
- RANGE(0,
- 10),
- RANGE(0,
- 1),
- RANGE(0,
- 0.1),
- RANGE(0,
- 0.02),
- }
+static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
+ }
};
struct dt2801_board {
@@ -289,13 +273,6 @@ static int dt2801_writedata(struct comedi_device *dev, unsigned int data)
outb_p(data & 0xff, dev->iobase + DT2801_DATA);
return 0;
}
-#if 0
- if (stat & DT_S_READY) {
- printk
- ("dt2801: ready flag set (bad!) in dt2801_writedata()\n");
- return -EIO;
- }
-#endif
} while (--timeout > 0);
return -ETIME;
@@ -343,11 +320,11 @@ static int dt2801_writecmd(struct comedi_device *dev, int command)
stat = inb_p(dev->iobase + DT2801_STATUS);
if (stat & DT_S_COMPOSITE_ERROR) {
- printk
- ("dt2801: composite-error in dt2801_writecmd(), ignoring\n");
+ dev_dbg(dev->class_dev,
+ "composite-error in %s, ignoring\n", __func__);
}
if (!(stat & DT_S_READY))
- printk("dt2801: !ready in dt2801_writecmd(), ignoring\n");
+ dev_dbg(dev->class_dev, "!ready in %s, ignoring\n", __func__);
outb_p(command, dev->iobase + DT2801_CMD);
return 0;
@@ -359,17 +336,12 @@ static int dt2801_reset(struct comedi_device *dev)
unsigned int stat;
int timeout;
- DPRINTK("dt2801: resetting board...\n");
- DPRINTK("fingerprint: 0x%02x 0x%02x\n", inb_p(dev->iobase),
- inb_p(dev->iobase + 1));
-
/* pull random data from data port */
inb_p(dev->iobase + DT2801_DATA);
inb_p(dev->iobase + DT2801_DATA);
inb_p(dev->iobase + DT2801_DATA);
inb_p(dev->iobase + DT2801_DATA);
- DPRINTK("dt2801: stop\n");
/* dt2801_writecmd(dev,DT_C_STOP); */
outb_p(DT_C_STOP, dev->iobase + DT2801_CMD);
@@ -382,12 +354,10 @@ static int dt2801_reset(struct comedi_device *dev)
break;
} while (timeout--);
if (!timeout)
- printk("dt2801: timeout 1 status=0x%02x\n", stat);
+ dev_dbg(dev->class_dev, "timeout 1 status=0x%02x\n", stat);
- /* printk("dt2801: reading dummy\n"); */
/* dt2801_readdata(dev,&board_code); */
- DPRINTK("dt2801: reset\n");
outb_p(DT_C_RESET, dev->iobase + DT2801_CMD);
/* dt2801_writecmd(dev,DT_C_RESET); */
@@ -399,13 +369,10 @@ static int dt2801_reset(struct comedi_device *dev)
break;
} while (timeout--);
if (!timeout)
- printk("dt2801: timeout 2 status=0x%02x\n", stat);
+ dev_dbg(dev->class_dev, "timeout 2 status=0x%02x\n", stat);
- DPRINTK("dt2801: reading code\n");
dt2801_readdata(dev, &board_code);
- DPRINTK("dt2801: ok. code=0x%02x\n", board_code);
-
return board_code;
}
@@ -465,12 +432,12 @@ static int dt2801_error(struct comedi_device *dev, int stat)
{
if (stat < 0) {
if (stat == -ETIME)
- printk("dt2801: timeout\n");
+ dev_dbg(dev->class_dev, "timeout\n");
else
- printk("dt2801: error %d\n", stat);
+ dev_dbg(dev->class_dev, "error %d\n", stat);
return stat;
}
- printk("dt2801: error status 0x%02x, resetting...\n", stat);
+ dev_dbg(dev->class_dev, "error status 0x%02x, resetting...\n", stat);
dt2801_reset(dev);
dt2801_reset(dev);
@@ -601,8 +568,8 @@ static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (boardtypes[type].boardcode == board_code)
goto havetype;
}
- printk("dt2801: unrecognized board code=0x%02x, contact author\n",
- board_code);
+ dev_dbg(dev->class_dev,
+ "unrecognized board code=0x%02x, contact author\n", board_code);
type = 0;
havetype:
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 0ca02fa..4271903 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -42,60 +42,59 @@ Configuration options:
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
#include "../comedidev.h"
static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
4, {
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25),
- RANGE(0, 0.625)
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ UNI_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
4, {
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(-0.625, 0.625),
- RANGE(-0.3125, 0.3125)
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
4, {
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(-0.625, 0.625)
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
4, {
- RANGE(0, 5),
- RANGE(0, 0.5),
- RANGE(0, 0.05),
- RANGE(0, 0.01)
+ UNI_RANGE(5),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.05),
+ UNI_RANGE(0.01)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
4, {
- RANGE(-2.5, 2.5),
- RANGE(-0.25, 0.25),
- RANGE(-0.025, 0.025),
- RANGE(-0.005, 0.005)
+ BIP_RANGE(2.5),
+ BIP_RANGE(0.25),
+ BIP_RANGE(0.025),
+ BIP_RANGE(0.005)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
4, {
- RANGE(-5, 5),
- RANGE(-0.5, 0.5),
- RANGE(-0.05, 0.05),
- RANGE(-0.01, 0.01)
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01)
}
};
@@ -227,33 +226,6 @@ static const struct comedi_lrange *dac_range_types[] = {
#define DT2811_TIMEOUT 5
-#if 0
-static irqreturn_t dt2811_interrupt(int irq, void *d)
-{
- int lo, hi;
- int data;
- struct comedi_device *dev = d;
- struct dt2811_private *devpriv = dev->private;
-
- if (!dev->attached) {
- comedi_error(dev, "spurious interrupt");
- return IRQ_HANDLED;
- }
-
- lo = inb(dev->iobase + DT2811_ADDATLO);
- hi = inb(dev->iobase + DT2811_ADDATHI);
-
- data = lo + (hi << 8);
-
- if (!(--devpriv->ntrig)) {
- /* how to turn off acquisition */
- s->async->events |= COMEDI_SB_EOA;
- }
- comedi_event(dev, s);
- return IRQ_HANDLED;
-}
-#endif
-
static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
@@ -278,35 +250,6 @@ static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
return i;
}
-#if 0
-/* Wow. This is code from the Comedi stone age. But it hasn't been
- * replaced, so I'll let it stay. */
-int dt2811_adtrig(kdev_t minor, comedi_adtrig *adtrig)
-{
- struct comedi_device *dev = comedi_devices + minor;
-
- if (adtrig->n < 1)
- return 0;
- dev->curadchan = adtrig->chan;
- switch (dev->i_admode) {
- case COMEDI_MDEMAND:
- dev->ntrig = adtrig->n - 1;
- /* not necessary */
- /*printk("dt2811: AD soft trigger\n"); */
- /*outb(DT2811_CLRERROR|DT2811_INTENB,
- dev->iobase+DT2811_ADCSR); */
- outb(dev->curadchan, dev->iobase + DT2811_ADGCR);
- do_gettimeofday(&trigtime);
- break;
- case COMEDI_MCONTS:
- dev->ntrig = adtrig->n;
- break;
- }
-
- return 0;
-}
-#endif
-
static int dt2811_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
@@ -386,10 +329,7 @@ static int dt2811_do_insn_bits(struct comedi_device *dev,
*/
static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- /* int i, irq; */
- /* unsigned long irqs; */
- /* long flags; */
-
+ /* int i; */
const struct dt2811_board *board = comedi_board(dev);
struct dt2811_private *devpriv;
int ret;
@@ -406,45 +346,6 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
i = inb(dev->iobase + DT2811_ADDATHI);
#endif
-#if 0
- irq = it->options[1];
- if (irq < 0) {
- save_flags(flags);
- sti();
- irqs = probe_irq_on();
-
- outb(DT2811_CLRERROR | DT2811_INTENB,
- dev->iobase + DT2811_ADCSR);
- outb(0, dev->iobase + DT2811_ADGCR);
-
- udelay(100);
-
- irq = probe_irq_off(irqs);
- restore_flags(flags);
-
- /*outb(DT2811_CLRERROR|DT2811_INTENB,
- dev->iobase+DT2811_ADCSR);*/
-
- if (inb(dev->iobase + DT2811_ADCSR) & DT2811_ADERROR)
- printk(KERN_ERR "error probing irq (bad)\n");
- dev->irq = 0;
- if (irq > 0) {
- i = inb(dev->iobase + DT2811_ADDATLO);
- i = inb(dev->iobase + DT2811_ADDATHI);
- printk(KERN_INFO "(irq = %d)\n", irq);
- ret = request_irq(irq, dt2811_interrupt, 0,
- dev->board_name, dev);
- if (ret < 0)
- return -EIO;
- dev->irq = irq;
- } else if (irq == 0) {
- printk(KERN_INFO "(no irq)\n");
- } else {
- printk(KERN_ERR "( multiple irq's -- this is bad! )\n");
- }
- }
-#endif
-
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 6514b9e..abad6e4 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -80,15 +80,12 @@ static int dt2814_ai_insn_read(struct comedi_device *dev,
outb(chan, dev->iobase + DT2814_CSR);
for (i = 0; i < DT2814_TIMEOUT; i++) {
status = inb(dev->iobase + DT2814_CSR);
- printk(KERN_INFO "dt2814: status: %02x\n", status);
udelay(10);
if (status & DT2814_FINISH)
break;
}
- if (i >= DT2814_TIMEOUT) {
- printk(KERN_INFO "dt2814: status: %02x\n", status);
+ if (i >= DT2814_TIMEOUT)
return -ETIMEDOUT;
- }
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
@@ -200,7 +197,7 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
int lo, hi;
struct comedi_device *dev = d;
struct dt2814_private *devpriv = dev->private;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s = dev->read_subdev;
int data;
if (!dev->attached) {
@@ -208,8 +205,6 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- s = &dev->subdevices[0];
-
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
@@ -238,9 +233,9 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct dt2814_private *devpriv;
- int i, irq;
- int ret;
struct comedi_subdevice *s;
+ int ret;
+ int i;
ret = comedi_request_region(dev, it->options[0], DT2814_SIZE);
if (ret)
@@ -249,49 +244,17 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outb(0, dev->iobase + DT2814_CSR);
udelay(100);
if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) {
- printk(KERN_ERR "reset error (fatal)\n");
+ dev_err(dev->class_dev, "reset error (fatal)\n");
return -EIO;
}
i = inb(dev->iobase + DT2814_DATA);
i = inb(dev->iobase + DT2814_DATA);
- irq = it->options[1];
-#if 0
- if (irq < 0) {
- save_flags(flags);
- sti();
- irqs = probe_irq_on();
-
- outb(0, dev->iobase + DT2814_CSR);
-
- udelay(100);
-
- irq = probe_irq_off(irqs);
- restore_flags(flags);
- if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR)
- printk(KERN_DEBUG "error probing irq (bad)\n");
-
-
- i = inb(dev->iobase + DT2814_DATA);
- i = inb(dev->iobase + DT2814_DATA);
- }
-#endif
- dev->irq = 0;
- if (irq > 0) {
- if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) {
- printk(KERN_WARNING "(irq %d unavailable)\n", irq);
- } else {
- printk(KERN_INFO "( irq = %d )\n", irq);
- dev->irq = irq;
- }
- } else if (irq == 0) {
- printk(KERN_WARNING "(no irq)\n");
- } else {
-#if 0
- printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n");
-#else
- printk(KERN_WARNING "(irq probe not implemented)\n");
-#endif
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], dt2814_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, 1);
@@ -303,16 +266,19 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -ENOMEM;
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 16; /* XXX */
- s->len_chanlist = 1;
s->insn_read = dt2814_ai_insn_read;
- s->do_cmd = dt2814_ai_cmd;
- s->do_cmdtest = dt2814_ai_cmdtest;
s->maxdata = 0xfff;
s->range_table = &range_unknown; /* XXX */
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmd = dt2814_ai_cmd;
+ s->do_cmdtest = dt2814_ai_cmdtest;
+ }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index 34040f0..ee24717 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -107,8 +107,9 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
status = dt2815_wait_for_status(dev, 0x00);
if (status != 0) {
- printk(KERN_WARNING "dt2815: failed to write low byte "
- "on %d reason %x\n", chan, status);
+ dev_dbg(dev->class_dev,
+ "failed to write low byte on %d reason %x\n",
+ chan, status);
return -EBUSY;
}
@@ -116,8 +117,9 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
status = dt2815_wait_for_status(dev, 0x10);
if (status != 0x10) {
- printk(KERN_WARNING "dt2815: failed to write high byte "
- "on %d reason %x\n", chan, status);
+ dev_dbg(dev->class_dev,
+ "failed to write high byte on %d reason %x\n",
+ chan, status);
return -EBUSY;
}
devpriv->ao_readback[chan] = data[i];
@@ -200,12 +202,13 @@ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned int program;
program = (it->options[4] & 0x3) << 3 | 0x7;
outb(program, dev->iobase + DT2815_DATA);
- printk(KERN_INFO ", program: 0x%x (@t=%d)\n",
- program, i);
+ dev_dbg(dev->class_dev, "program: 0x%x (@t=%d)\n",
+ program, i);
break;
} else if (status != 0x00) {
- printk(KERN_WARNING "dt2815: unexpected status 0x%x "
- "(@t=%d)\n", status, i);
+ dev_dbg(dev->class_dev,
+ "unexpected status 0x%x (@t=%d)\n",
+ status, i);
if (status & 0x60)
outb(0x00, dev->iobase + DT2815_STATUS);
}
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index a01e6b5..895f73a 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -63,8 +63,6 @@ Notes:
#include "comedi_fc.h"
-#define DEBUG
-
#define DT2821_TIMEOUT 100 /* 500 us */
#define DT2821_SIZE 0x10
@@ -156,55 +154,55 @@ Notes:
static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
4, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25)
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
}
};
static const struct comedi_lrange range_dt282x_ai_lo_unipolar = {
4, {
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25)
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_dt282x_ai_5_bipolar = {
4, {
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(-0.625, 0.625)
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt282x_ai_5_unipolar = {
4, {
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25),
- RANGE(0, 0.625),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ UNI_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt282x_ai_hi_bipolar = {
4, {
- RANGE(-10, 10),
- RANGE(-1, 1),
- RANGE(-0.1, 0.1),
- RANGE(-0.02, 0.02)
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02)
}
};
static const struct comedi_lrange range_dt282x_ai_hi_unipolar = {
4, {
- RANGE(0, 10),
- RANGE(0, 1),
- RANGE(0, 0.1),
- RANGE(0, 0.02)
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
}
};
@@ -308,15 +306,15 @@ static void dt282x_munge(struct comedi_device *dev, unsigned short *buf,
static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->write_subdev;
void *ptr;
int size;
int i;
- struct comedi_subdevice *s = &dev->subdevices[1];
outw(devpriv->supcsr | DT2821_CLRDMADNE, dev->iobase + DT2821_SUPCSR);
if (!s->async->prealloc_buf) {
- printk(KERN_ERR "async->data disappeared. dang!\n");
+ dev_err(dev->class_dev, "no buffer in %s\n", __func__);
return;
}
@@ -329,7 +327,7 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
if (size == 0) {
- printk(KERN_ERR "dt282x: AO underrun\n");
+ dev_err(dev->class_dev, "AO underrun\n");
dt282x_ao_cancel(dev, s);
s->async->events |= COMEDI_CB_OVERFLOW;
return;
@@ -341,16 +339,16 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
void *ptr;
int size;
int i;
int ret;
- struct comedi_subdevice *s = &dev->subdevices[0];
outw(devpriv->supcsr | DT2821_CLRDMADNE, dev->iobase + DT2821_SUPCSR);
if (!s->async->prealloc_buf) {
- printk(KERN_ERR "async->data disappeared. dang!\n");
+ dev_err(dev->class_dev, "no buffer in %s\n", __func__);
return;
}
@@ -371,7 +369,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
devpriv->nread -= size / 2;
if (devpriv->nread < 0) {
- printk(KERN_INFO "dt282x: off by one\n");
+ dev_info(dev->class_dev, "nread off by one\n");
devpriv->nread = 0;
}
if (!devpriv->nread) {
@@ -450,8 +448,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct dt282x_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- struct comedi_subdevice *s_ao;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
unsigned int supcsr, adcsr, dacsr;
int handled = 0;
@@ -460,8 +458,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- s = &dev->subdevices[0];
- s_ao = &dev->subdevices[1];
adcsr = inw(dev->iobase + DT2821_ADCSR);
dacsr = inw(dev->iobase + DT2821_DACSR);
supcsr = inw(dev->iobase + DT2821_SUPCSR);
@@ -481,13 +477,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
handled = 1;
}
if (dacsr & DT2821_DAERR) {
-#if 0
- static int warn = 5;
- if (--warn <= 0) {
- disable_irq(dev->irq);
- printk(KERN_INFO "disabling irq\n");
- }
-#endif
comedi_error(dev, "D/A error");
dt282x_ao_cancel(dev, s_ao);
s->async->events |= COMEDI_CB_ERROR;
@@ -520,8 +509,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#endif
comedi_event(dev, s);
- /* printk("adcsr=0x%02x dacsr-0x%02x supcsr=0x%02x\n",
- adcsr, dacsr, supcsr); */
+
return IRQ_RETVAL(handled);
}
@@ -894,7 +882,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf,
devpriv->dma_maxsize);
if (size == 0) {
- printk(KERN_ERR "dt282x: AO underrun\n");
+ dev_err(dev->class_dev, "AO underrun\n");
return -EPIPE;
}
prep_ao_dma(dev, 0, size);
@@ -902,7 +890,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf,
devpriv->dma_maxsize);
if (size == 0) {
- printk(KERN_ERR "dt282x: AO underrun\n");
+ dev_err(dev->class_dev, "AO underrun\n");
return -EPIPE;
}
prep_ao_dma(dev, 1, size);
@@ -1062,10 +1050,8 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
devpriv->usedma = 0;
- if (!dma1 && !dma2) {
- printk(KERN_ERR " (no dma)");
+ if (!dma1 && !dma2)
return 0;
- }
if (dma1 == dma2 || dma1 < 5 || dma2 < 5 || dma1 > 7 || dma2 > 7)
return -EINVAL;
@@ -1090,12 +1076,8 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
devpriv->dma_maxsize = PAGE_SIZE;
devpriv->dma[0].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
devpriv->dma[1].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (!devpriv->dma[0].buf || !devpriv->dma[1].buf) {
- printk(KERN_ERR " can't get DMA memory");
+ if (!devpriv->dma[0].buf || !devpriv->dma[1].buf)
return -ENOMEM;
- }
-
- printk(KERN_INFO " (dma=%d,%d)", dma1, dma2);
devpriv->usedma = 1;
@@ -1120,9 +1102,9 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt282x_board *board = comedi_board(dev);
struct dt282x_private *devpriv;
- int i, irq;
- int ret;
struct comedi_subdevice *s;
+ int ret;
+ int i;
ret = comedi_request_region(dev, it->options[0], DT2821_SIZE);
if (ret)
@@ -1130,14 +1112,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outw(DT2821_BDINIT, dev->iobase + DT2821_SUPCSR);
i = inw(dev->iobase + DT2821_ADCSR);
-#ifdef DEBUG
- printk(KERN_DEBUG " fingerprint=%x,%x,%x,%x,%x",
- inw(dev->iobase + DT2821_ADCSR),
- inw(dev->iobase + DT2821_CHANCSR),
- inw(dev->iobase + DT2821_DACSR),
- inw(dev->iobase + DT2821_SUPCSR),
- inw(dev->iobase + DT2821_TMRCTR));
-#endif
if (((inw(dev->iobase + DT2821_ADCSR) & DT2821_ADCSR_MASK)
!= DT2821_ADCSR_VAL) ||
@@ -1149,58 +1123,28 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
!= DT2821_SUPCSR_VAL) ||
((inw(dev->iobase + DT2821_TMRCTR) & DT2821_TMRCTR_MASK)
!= DT2821_TMRCTR_VAL)) {
- printk(KERN_ERR " board not found");
+ dev_err(dev->class_dev, "board not found\n");
return -EIO;
}
/* should do board test */
- irq = it->options[opt_irq];
-#if 0
- if (irq < 0) {
- unsigned long flags;
- int irqs;
-
- save_flags(flags);
- sti();
- irqs = probe_irq_on();
-
- /* trigger interrupt */
-
- udelay(100);
-
- irq = probe_irq_off(irqs);
- restore_flags(flags);
- if (0 /* error */)
- printk(KERN_ERR " error probing irq (bad)");
- }
-#endif
- if (irq > 0) {
- printk(KERN_INFO " ( irq = %d )", irq);
- ret = request_irq(irq, dt282x_interrupt, 0,
+ if (it->options[opt_irq] > 0) {
+ ret = request_irq(it->options[opt_irq], dt282x_interrupt, 0,
dev->board_name, dev);
- if (ret < 0) {
- printk(KERN_ERR " failed to get irq\n");
- return -EIO;
- }
- dev->irq = irq;
- } else if (irq == 0) {
- printk(KERN_INFO " (no irq)");
- } else {
-#if 0
- printk(KERN_INFO " (probe returned multiple irqs--bad)");
-#else
- printk(KERN_INFO " (irq probe not implemented)");
-#endif
+ if (ret == 0)
+ dev->irq = it->options[opt_irq];
}
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- ret = dt282x_grab_dma(dev, it->options[opt_dma1],
- it->options[opt_dma2]);
- if (ret < 0)
- return ret;
+ if (dev->irq) {
+ ret = dt282x_grab_dma(dev, it->options[opt_dma1],
+ it->options[opt_dma2]);
+ if (ret < 0)
+ return ret;
+ }
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
@@ -1208,22 +1152,25 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ |
+ s->subdev_flags = SDF_READABLE |
((it->options[opt_diff]) ? SDF_DIFF : SDF_COMMON);
s->n_chan =
(it->options[opt_diff]) ? board->adchan_di : board->adchan_se;
s->insn_read = dt282x_ai_insn_read;
- s->do_cmdtest = dt282x_ai_cmdtest;
- s->do_cmd = dt282x_ai_cmd;
- s->cancel = dt282x_ai_cancel;
s->maxdata = (1 << board->adbits) - 1;
- s->len_chanlist = 16;
s->range_table =
opt_ai_range_lkup(board->ispgl, it->options[opt_ai_range]);
devpriv->ad_2scomp = it->options[opt_ai_twos];
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 16;
+ s->do_cmdtest = dt282x_ai_cmdtest;
+ s->do_cmd = dt282x_ai_cmd;
+ s->cancel = dt282x_ai_cancel;
+ }
s = &dev->subdevices[1];
@@ -1231,15 +1178,10 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (s->n_chan) {
/* ao subsystem */
s->type = COMEDI_SUBD_AO;
- dev->write_subdev = s;
- s->subdev_flags = SDF_WRITABLE | SDF_CMD_WRITE;
+ s->subdev_flags = SDF_WRITABLE;
s->insn_read = dt282x_ao_insn_read;
s->insn_write = dt282x_ao_insn_write;
- s->do_cmdtest = dt282x_ao_cmdtest;
- s->do_cmd = dt282x_ao_cmd;
- s->cancel = dt282x_ao_cancel;
s->maxdata = (1 << board->dabits) - 1;
- s->len_chanlist = 2;
s->range_table_list = devpriv->darangelist;
devpriv->darangelist[0] =
opt_ao_range_lkup(it->options[opt_ao0_range]);
@@ -1247,6 +1189,14 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
opt_ao_range_lkup(it->options[opt_ao1_range]);
devpriv->da0_2scomp = it->options[opt_ao0_twos];
devpriv->da1_2scomp = it->options[opt_ao1_twos];
+ if (dev->irq) {
+ dev->write_subdev = s;
+ s->subdev_flags |= SDF_CMD_WRITE;
+ s->len_chanlist = 2;
+ s->do_cmdtest = dt282x_ao_cmdtest;
+ s->do_cmd = dt282x_ao_cmd;
+ s->cancel = dt282x_ao_cancel;
+ }
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1261,8 +1211,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 1;
s->range_table = &range_digital;
- printk(KERN_INFO "\n");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 292226e..f52a447 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -48,8 +48,6 @@ AO commands are not supported.
you the docs without one, also.
*/
-#define DEBUG 1
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -253,24 +251,6 @@ struct dt3k_private {
unsigned int ai_rear;
};
-#ifdef DEBUG
-static char *intr_flags[] = {
- "AdFull", "AdSwError", "AdHwError", "DaEmpty",
- "DaSwError", "DaHwError", "CtDone", "CmDone",
-};
-
-static void debug_intr_flags(unsigned int flags)
-{
- int i;
- printk(KERN_DEBUG "dt3k: intr_flags:");
- for (i = 0; i < 8; i++) {
- if (flags & (1 << i))
- printk(KERN_CONT " %s", intr_flags[i]);
- }
- printk(KERN_CONT "\n");
-}
-#endif
-
#define TIMEOUT 100
static void dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
@@ -372,17 +352,13 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct dt3k_private *devpriv = dev->private;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int status;
if (!dev->attached)
return IRQ_NONE;
- s = &dev->subdevices[0];
status = readw(devpriv->io_addr + DPR_Intr_Flag);
-#ifdef DEBUG
- debug_intr_flags(status);
-#endif
if (status & DT3000_ADFULL) {
dt3k_ai_empty_fifo(dev, s);
@@ -725,29 +701,33 @@ static int dt3000_auto_attach(struct comedi_device *dev,
if (!devpriv->io_addr)
return -ENOMEM;
- ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret)
- return ret;
- dev->irq = pcidev->irq;
+ if (pcidev->irq) {
+ ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = this_board->adchan;
s->insn_read = dt3k_ai_insn;
s->maxdata = (1 << this_board->adbits) - 1;
- s->len_chanlist = 512;
s->range_table = &range_dt3000_ai; /* XXX */
- s->do_cmd = dt3k_ai_cmd;
- s->do_cmdtest = dt3k_ai_cmdtest;
- s->cancel = dt3k_ai_cancel;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 512;
+ s->do_cmd = dt3k_ai_cmd;
+ s->do_cmdtest = dt3k_ai_cmdtest;
+ s->cancel = dt3k_ai_cancel;
+ }
s = &dev->subdevices[1];
/* ao subsystem */
@@ -818,7 +798,7 @@ static int dt3000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &dt3000_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(dt3000_pci_table) = {
+static const struct pci_device_id dt3000_pci_table[] = {
{ PCI_VDEVICE(DT, 0x0022), BOARD_DT3001 },
{ PCI_VDEVICE(DT, 0x0023), BOARD_DT3002 },
{ PCI_VDEVICE(DT, 0x0024), BOARD_DT3003 },
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 73af600..b3aeb6f 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -41,7 +41,6 @@ for my needs.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index f2a9f1c..f224825 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -42,11 +42,12 @@
#define READ_TIMEOUT 50
-static const struct comedi_lrange range_pci1050_ai = { 3, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- UNI_RANGE(10)
- }
+static const struct comedi_lrange range_pci1050_ai = {
+ 3, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ UNI_RANGE(10)
+ }
};
static const char range_codes_pci1050_ai[] = { 0x00, 0x10, 0x30 };
@@ -90,8 +91,7 @@ static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
goto conv_finish;
}
data[n] = 0;
- printk(KERN_DEBUG "comedi: dyna_pci10xx: "
- "timeout reading analog input\n");
+ dev_dbg(dev->class_dev, "timeout reading analog input\n");
continue;
conv_finish:
/* mask the first 4 bits - EOC bits */
@@ -260,7 +260,7 @@ static int dyna_pci10xx_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(dyna_pci10xx_pci_table) = {
+static const struct pci_device_id dyna_pci10xx_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, 0x1050) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index e3ff4c4..a99ddf0 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -16,8 +16,6 @@ Configuration options:
[0] - I/O port base address
*/
-#define DEBUG 0
-
#include <linux/module.h>
#include "../comedidev.h"
@@ -28,15 +26,16 @@ struct fl512_private {
unsigned short ao_readback[2];
};
-static const struct comedi_lrange range_fl512 = { 4, {
- BIP_RANGE(0.5),
- BIP_RANGE(1),
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+static const struct comedi_lrange range_fl512 = {
+ 4, {
+ BIP_RANGE(0.5),
+ BIP_RANGE(1),
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
/*
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 559bf55..de60a28 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -60,15 +60,6 @@ static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
static irqreturn_t handle_interrupt(int irq, void *d);
static int dio_config_block_size(struct comedi_device *dev, unsigned int *data);
-#undef HPDI_DEBUG /* disable debugging messages */
-/* #define HPDI_DEBUG enable debugging code */
-
-#ifdef HPDI_DEBUG
-#define DEBUG_PRINT(format, args...) pr_debug(format , ## args)
-#else
-#define DEBUG_PRINT(format, args...) no_printk(pr_fmt(format), ## args)
-#endif
-
#define TIMER_BASE 50 /* 20MHz master clock */
#define DMA_BUFFER_SIZE 0x10000
#define NUM_DMA_BUFFERS 4
@@ -260,32 +251,6 @@ static void init_plx9080(struct comedi_device *dev)
uint32_t bits;
void __iomem *plx_iobase = devpriv->plx9080_iobase;
- /* plx9080 dump */
- DEBUG_PRINT(" plx interrupt status 0x%x\n",
- readl(plx_iobase + PLX_INTRCS_REG));
- DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG));
- DEBUG_PRINT(" plx control reg 0x%x\n",
- readl(devpriv->plx9080_iobase + PLX_CONTROL_REG));
-
- DEBUG_PRINT(" plx revision 0x%x\n",
- readl(plx_iobase + PLX_REVISION_REG));
- DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA0_MODE_REG));
- DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA1_MODE_REG));
- DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n",
- readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG));
- DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n",
- readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG));
- DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n",
- readb(plx_iobase + PLX_DMA0_CS_REG));
- DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n",
- readl(plx_iobase + PLX_DMA0_THRESHOLD_REG));
- DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG));
#ifdef __BIG_ENDIAN
bits = BIGEND_DMA0 | BIGEND_DMA1;
#else
@@ -395,10 +360,6 @@ static int setup_dma_descriptors(struct comedi_device *dev,
if (transfer_size == 0)
return -1;
- DEBUG_PRINT(" transfer_size %i\n", transfer_size);
- DEBUG_PRINT(" descriptors at 0x%lx\n",
- (unsigned long)devpriv->dma_desc_phys_addr);
-
buffer_offset = 0;
buffer_index = 0;
for (i = 0; i < NUM_DMA_DESCRIPTORS &&
@@ -423,21 +384,11 @@ static int setup_dma_descriptors(struct comedi_device *dev,
buffer_offset = 0;
buffer_index++;
}
-
- DEBUG_PRINT(" desc %i\n", i);
- DEBUG_PRINT(" start addr virt 0x%p, phys 0x%lx\n",
- devpriv->desc_dio_buffer[i],
- (unsigned long)devpriv->dma_desc[i].
- pci_start_addr);
- DEBUG_PRINT(" next 0x%lx\n",
- (unsigned long)devpriv->dma_desc[i].next);
}
devpriv->num_dma_descriptors = i;
/* fix last descriptor to point back to first */
devpriv->dma_desc[i - 1].next =
cpu_to_le32(devpriv->dma_desc_phys_addr | next_bits);
- DEBUG_PRINT(" desc %i next fixup 0x%lx\n", i - 1,
- (unsigned long)devpriv->dma_desc[i - 1].next);
devpriv->block_size = transfer_size;
@@ -489,9 +440,6 @@ static int hpdi_auto_attach(struct comedi_device *dev,
return -ENOMEM;
}
- DEBUG_PRINT(" plx9080 remapped to 0x%p\n", devpriv->plx9080_iobase);
- DEBUG_PRINT(" hpdi remapped to 0x%p\n", devpriv->hpdi_iobase);
-
init_plx9080(dev);
/* get irq */
@@ -510,9 +458,6 @@ static int hpdi_auto_attach(struct comedi_device *dev,
devpriv->dio_buffer[i] =
pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
&devpriv->dio_buffer_phys_addr[i]);
- DEBUG_PRINT("dio_buffer at virt 0x%p, phys 0x%lx\n",
- devpriv->dio_buffer[i],
- (unsigned long)devpriv->dio_buffer_phys_addr[i]);
}
/* allocate dma descriptors */
devpriv->dma_desc = pci_alloc_consistent(pcidev,
@@ -687,8 +632,6 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
hpdi_writel(dev, RX_FIFO_RESET_BIT, BOARD_CONTROL_REG);
- DEBUG_PRINT("hpdi: in di_cmd\n");
-
abort_dma(dev, 0);
devpriv->dma_desc_index = 0;
@@ -725,7 +668,6 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writel(intr_bit(RX_FULL_INTR),
devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
- DEBUG_PRINT("hpdi: starting rx\n");
hpdi_writel(dev, RX_ENABLE_BIT, BOARD_CONTROL_REG);
return 0;
@@ -778,11 +720,6 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
num_samples * sizeof(uint32_t));
devpriv->dma_desc_index++;
devpriv->dma_desc_index %= devpriv->num_dma_descriptors;
-
- DEBUG_PRINT("next desc addr 0x%lx\n", (unsigned long)
- devpriv->dma_desc[devpriv->dma_desc_index].
- next);
- DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr);
}
/* XXX check for buffer overrun somehow */
}
@@ -812,7 +749,6 @@ static irqreturn_t handle_interrupt(int irq, void *d)
async->events = 0;
if (hpdi_intr_status) {
- DEBUG_PRINT("hpdi: intr status 0x%x, ", hpdi_intr_status);
writel(hpdi_intr_status,
devpriv->hpdi_iobase + INTERRUPT_STATUS_REG);
}
@@ -823,10 +759,8 @@ static irqreturn_t handle_interrupt(int irq, void *d)
writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- DEBUG_PRINT("dma0 status 0x%x\n", dma0_status);
if (dma0_status & PLX_DMA_EN_BIT)
drain_dma_buffers(dev, 0);
- DEBUG_PRINT(" cleared dma ch0 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -836,9 +770,6 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */
writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- DEBUG_PRINT("dma1 status 0x%x\n", dma1_status);
-
- DEBUG_PRINT(" cleared dma ch1 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -846,15 +777,11 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits);
}
if (hpdi_board_status & RX_OVERRUN_BIT) {
comedi_error(dev, "rx fifo overrun");
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- DEBUG_PRINT("dma0_status 0x%x\n",
- (int)readb(devpriv->plx9080_iobase +
- PLX_DMA0_CS_REG));
}
if (hpdi_board_status & RX_UNDERRUN_BIT) {
@@ -865,11 +792,6 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (devpriv->dio_count == 0)
async->events |= COMEDI_CB_EOA;
- DEBUG_PRINT("board status 0x%x, ", hpdi_board_status);
- DEBUG_PRINT("plx status 0x%x\n", plx_status);
- if (async->events)
- DEBUG_PRINT(" events 0x%x\n", async->events);
-
cfc_handle_events(dev, s);
return IRQ_HANDLED;
@@ -914,7 +836,7 @@ static int gsc_hpdi_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &gsc_hpdi_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(gsc_hpdi_pci_table) = {
+static const struct pci_device_id gsc_hpdi_pci_table[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9080, PCI_VENDOR_ID_PLX,
0x2400, 0, 0, 0},
{ 0 }
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 1e16641..80539b2 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -91,12 +91,13 @@ Configuration options: not applicable, uses PCI auto config
#define Status_IRQ 0x00ff /* All interrupts */
/* Define analogue range */
-static const struct comedi_lrange range_analog = { 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange range_analog = {
+ 4, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10)
+ }
};
static const char range_codes_analog[] = { 0x00, 0x20, 0x10, 0x30 };
@@ -597,7 +598,7 @@ static int icp_multi_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &icp_multi_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(icp_multi_pci_table) = {
+static const struct pci_device_id icp_multi_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ICP, PCI_DEVICE_ID_ICP_MULTI) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index b52d58e..6100c12 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -807,7 +807,7 @@ static int jr3_pci_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &jr3_pci_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
+static const struct pci_device_id jr3_pci_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 15589f6..6b9846f 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -139,7 +139,7 @@ static int ke_counter_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ke_counter_pci_table) = {
+static const struct pci_device_id ke_counter_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 3d12e91..e739bcd 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -328,13 +328,12 @@ static const struct me4000_board me4000_boards[] = {
};
static const struct comedi_lrange me4000_ai_range = {
- 4,
- {
- UNI_RANGE(2.5),
- UNI_RANGE(10),
- BIP_RANGE(2.5),
- BIP_RANGE(10),
- }
+ 4, {
+ UNI_RANGE(2.5),
+ UNI_RANGE(10),
+ BIP_RANGE(2.5),
+ BIP_RANGE(10)
+ }
};
#define FIRMWARE_NOT_AVAILABLE 1
@@ -1105,7 +1104,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
{
unsigned int tmp;
struct comedi_device *dev = dev_id;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int i;
int c = 0;
unsigned int lval;
@@ -1116,12 +1115,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
/* Reset all events */
s->async->events = 0;
- /* Check if irq number is right */
- if (irq != dev->irq) {
- dev_err(dev->class_dev, "Incorrect interrupt num: %d\n", irq);
- return IRQ_HANDLED;
- }
-
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_AI_HF) {
/* Read status register to find out what happened */
@@ -1505,6 +1498,13 @@ static int me4000_auto_attach(struct comedi_device *dev,
me4000_reset(dev);
+ if (pcidev->irq > 0) {
+ result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED,
+ dev->board_name, dev);
+ if (result == 0)
+ dev->irq = pcidev->irq;
+ }
+
result = comedi_alloc_subdevices(dev, 4);
if (result)
return result;
@@ -1525,22 +1525,12 @@ static int me4000_auto_attach(struct comedi_device *dev,
s->range_table = &me4000_ai_range;
s->insn_read = me4000_ai_insn_read;
- if (pcidev->irq > 0) {
- if (request_irq(pcidev->irq, me4000_ai_isr,
- IRQF_SHARED, dev->board_name, dev)) {
- dev_warn(dev->class_dev,
- "request_irq failed\n");
- } else {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = me4000_ai_cancel;
- s->do_cmdtest = me4000_ai_do_cmd_test;
- s->do_cmd = me4000_ai_do_cmd;
-
- dev->irq = pcidev->irq;
- }
- } else {
- dev_warn(dev->class_dev, "No interrupt available\n");
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->cancel = me4000_ai_cancel;
+ s->do_cmdtest = me4000_ai_do_cmd_test;
+ s->do_cmd = me4000_ai_do_cmd;
}
} else {
s->type = COMEDI_SUBD_UNUSED;
@@ -1635,7 +1625,7 @@ static int me4000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &me4000_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(me4000_pci_table) = {
+static const struct pci_device_id me4000_pci_table[] = {
{ PCI_VDEVICE(MEILHAUS, 0x4650), BOARD_ME4650 },
{ PCI_VDEVICE(MEILHAUS, 0x4660), BOARD_ME4660 },
{ PCI_VDEVICE(MEILHAUS, 0x4661), BOARD_ME4660I },
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 24ec9ef..7f66878 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -576,7 +576,7 @@ static int me_daq_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &me_daq_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(me_daq_pci_table) = {
+static const struct pci_device_id me_daq_pci_table[] = {
{ PCI_VDEVICE(MEILHAUS, 0x2600), BOARD_ME2600 },
{ PCI_VDEVICE(MEILHAUS, 0x2000), BOARD_ME2000 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
new file mode 100644
index 0000000..81b78e0
--- /dev/null
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -0,0 +1,354 @@
+/*
+ * comedi/drivers/mf6x4.c
+ * Driver for Humusoft MF634 and MF624 Data acquisition cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Driver: mf6x4
+ * Description: Humusoft MF634 and MF624 Data acquisition card driver
+ * Devices: Humusoft MF634, Humusoft MF624
+ * Author: Rostislav Lisovy <lisovy@gmail.com>
+ * Status: works
+ * Updated:
+ * Configuration Options: none
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "../comedidev.h"
+
+/* Registers present in BAR0 memory region */
+#define MF624_GPIOC_R 0x54
+
+#define MF6X4_GPIOC_EOLC /* End Of Last Conversion */ (1 << 17)
+#define MF6X4_GPIOC_LDAC /* Load DACs */ (1 << 23)
+#define MF6X4_GPIOC_DACEN (1 << 26)
+
+/* BAR1 registers */
+#define MF6X4_DIN_R 0x10
+#define MF6X4_DIN_M 0xff
+#define MF6X4_DOUT_R 0x10
+#define MF6X4_DOUT_M 0xff
+
+#define MF6X4_ADSTART_R 0x20
+#define MF6X4_ADDATA_R 0x00
+#define MF6X4_ADCTRL_R 0x00
+#define MF6X4_ADCTRL_M 0xff
+
+#define MF6X4_DA0_R 0x20
+#define MF6X4_DA1_R 0x22
+#define MF6X4_DA2_R 0x24
+#define MF6X4_DA3_R 0x26
+#define MF6X4_DA4_R 0x28
+#define MF6X4_DA5_R 0x2a
+#define MF6X4_DA6_R 0x2c
+#define MF6X4_DA7_R 0x2e
+/* Map DAC cahnnel id to real HW-dependent offset value */
+#define MF6X4_DAC_R(x) (0x20 + ((x) * 2))
+#define MF6X4_DA_M 0x3fff
+
+/* BAR2 registers */
+#define MF634_GPIOC_R 0x68
+
+enum mf6x4_boardid {
+ BOARD_MF634,
+ BOARD_MF624,
+};
+
+struct mf6x4_board {
+ const char *name;
+ unsigned int bar_nums[3]; /* We need to keep track of the
+ order of BARs used by the cards */
+};
+
+static const struct mf6x4_board mf6x4_boards[] = {
+ [BOARD_MF634] = {
+ .name = "mf634",
+ .bar_nums = {0, 2, 3},
+ },
+ [BOARD_MF624] = {
+ .name = "mf624",
+ .bar_nums = {0, 2, 4},
+ },
+};
+
+struct mf6x4_private {
+ /*
+ * Documentation for both MF634 and MF624 describes registers
+ * present in BAR0, 1 and 2 regions.
+ * The real (i.e. in HW) BAR numbers are different for MF624
+ * and MF634 yet we will call them 0, 1, 2
+ */
+ void __iomem *bar0_mem;
+ void __iomem *bar1_mem;
+ void __iomem *bar2_mem;
+
+ /*
+ * This configuration register has the same function and fields
+ * for both cards however it lies in different BARs on different
+ * offsets -- this variable makes the access easier
+ */
+ void __iomem *gpioc_R;
+
+ /* DAC value cache -- used for insn_read function */
+ int ao_readback[8];
+};
+
+static int mf6x4_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+
+ data[1] = ioread16(devpriv->bar1_mem + MF6X4_DIN_R) & MF6X4_DIN_M;
+
+ return insn->n;
+}
+
+static int mf6x4_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+
+ if (comedi_dio_update_state(s, data))
+ iowrite16(s->state & MF6X4_DOUT_M,
+ devpriv->bar1_mem + MF6X4_DOUT_R);
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int mf6x4_ai_wait_for_eoc(struct comedi_device *dev,
+ unsigned int timeout)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ unsigned int eolc;
+
+ while (timeout--) {
+ eolc = ioread32(devpriv->gpioc_R) & MF6X4_GPIOC_EOLC;
+ if (eolc)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int mf6x4_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ int chan = CR_CHAN(insn->chanspec);
+ int ret;
+ int i;
+ int d;
+
+ /* Set the ADC channel number in the scan list */
+ iowrite16((1 << chan) & MF6X4_ADCTRL_M,
+ devpriv->bar1_mem + MF6X4_ADCTRL_R);
+
+ for (i = 0; i < insn->n; i++) {
+ /* Trigger ADC conversion by reading ADSTART */
+ ioread16(devpriv->bar1_mem + MF6X4_ADSTART_R);
+
+ ret = mf6x4_ai_wait_for_eoc(dev, 100);
+ if (ret)
+ return ret;
+
+ /* Read the actual value */
+ d = ioread16(devpriv->bar1_mem + MF6X4_ADDATA_R);
+ d &= s->maxdata;
+ data[i] = d;
+ }
+
+ iowrite16(0x0, devpriv->bar1_mem + MF6X4_ADCTRL_R);
+
+ return insn->n;
+}
+
+static int mf6x4_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ uint32_t gpioc;
+ int i;
+
+ /* Enable instantaneous update of converters outputs + Enable DACs */
+ gpioc = ioread32(devpriv->gpioc_R);
+ iowrite32((gpioc & ~MF6X4_GPIOC_LDAC) | MF6X4_GPIOC_DACEN,
+ devpriv->gpioc_R);
+
+ for (i = 0; i < insn->n; i++) {
+ iowrite16(data[i] & MF6X4_DA_M,
+ devpriv->bar1_mem + MF6X4_DAC_R(chan));
+
+ devpriv->ao_readback[chan] = data[i];
+ }
+
+ return insn->n;
+}
+
+static int mf6x4_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int mf6x4_auto_attach(struct comedi_device *dev, unsigned long context)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ const struct mf6x4_board *board = NULL;
+ struct mf6x4_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ if (context < ARRAY_SIZE(mf6x4_boards))
+ board = &mf6x4_boards[context];
+ else
+ return -ENODEV;
+
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ devpriv->bar0_mem = pci_ioremap_bar(pcidev, board->bar_nums[0]);
+ if (!devpriv->bar0_mem)
+ return -ENODEV;
+
+ devpriv->bar1_mem = pci_ioremap_bar(pcidev, board->bar_nums[1]);
+ if (!devpriv->bar1_mem)
+ return -ENODEV;
+
+ devpriv->bar2_mem = pci_ioremap_bar(pcidev, board->bar_nums[2]);
+ if (!devpriv->bar2_mem)
+ return -ENODEV;
+
+ if (board == &mf6x4_boards[BOARD_MF634])
+ devpriv->gpioc_R = devpriv->bar2_mem + MF634_GPIOC_R;
+ else
+ devpriv->gpioc_R = devpriv->bar0_mem + MF624_GPIOC_R;
+
+
+ ret = comedi_alloc_subdevices(dev, 4);
+ if (ret)
+ return ret;
+
+ /* ADC */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 8;
+ s->maxdata = 0x3fff; /* 14 bits ADC */
+ s->range_table = &range_bipolar10;
+ s->insn_read = mf6x4_ai_insn_read;
+
+ /* DAC */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 0x3fff; /* 14 bits DAC */
+ s->range_table = &range_bipolar10;
+ s->insn_write = mf6x4_ao_insn_write;
+ s->insn_read = mf6x4_ao_insn_read;
+
+ /* DIN */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = mf6x4_di_insn_bits;
+
+ /* DOUT */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = mf6x4_do_insn_bits;
+
+ return 0;
+}
+
+static void mf6x4_detach(struct comedi_device *dev)
+{
+ struct mf6x4_private *devpriv = dev->private;
+
+ if (devpriv->bar0_mem)
+ iounmap(devpriv->bar0_mem);
+ if (devpriv->bar1_mem)
+ iounmap(devpriv->bar1_mem);
+ if (devpriv->bar2_mem)
+ iounmap(devpriv->bar2_mem);
+
+ comedi_pci_disable(dev);
+}
+
+static struct comedi_driver mf6x4_driver = {
+ .driver_name = "mf6x4",
+ .module = THIS_MODULE,
+ .auto_attach = mf6x4_auto_attach,
+ .detach = mf6x4_detach,
+};
+
+static int mf6x4_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &mf6x4_driver, id->driver_data);
+}
+
+static const struct pci_device_id mf6x4_pci_table[] = {
+ { PCI_VDEVICE(HUMUSOFT, 0x0634), BOARD_MF634 },
+ { PCI_VDEVICE(HUMUSOFT, 0x0624), BOARD_MF624 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, mf6x4_pci_table);
+
+static struct pci_driver mf6x4_pci_driver = {
+ .name = "mf6x4",
+ .id_table = mf6x4_pci_table,
+ .probe = mf6x4_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+
+module_comedi_pci_driver(mf6x4_driver, mf6x4_pci_driver);
+
+MODULE_AUTHOR("Rostislav Lisovy <lisovy@gmail.com>");
+MODULE_DESCRIPTION("Comedi MF634 and MF624 DAQ cards driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 35cb4ac..9c9a0ee 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -288,7 +288,6 @@ void mite_dma_arm(struct mite_channel *mite_chan)
int chor;
unsigned long flags;
- MDPRINTK("mite_dma_arm ch%i\n", mite_chan->channel);
/*
* memory barrier is intended to insure any twiddling with the buffer
* is done before writing to the mite to arm dma transfer
@@ -329,8 +328,6 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
n_links = async->prealloc_bufsz >> PAGE_SHIFT;
- MDPRINTK("ring->hw_dev=%p, n_links=0x%04x\n", ring->hw_dev, n_links);
-
ring->descriptors =
dma_alloc_coherent(ring->hw_dev,
n_links * sizeof(struct mite_dma_descriptor),
@@ -345,7 +342,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
for (i = 0; i < n_links; i++) {
ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
ring->descriptors[i].addr =
- cpu_to_le32(async->buf_page_list[i].dma_addr);
+ cpu_to_le32(async->buf_map->page_list[i].dma_addr);
ring->descriptors[i].next =
cpu_to_le32(ring->descriptors_dma_addr + (i +
1) *
@@ -368,8 +365,6 @@ void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int chor, chcr, mcr, dcr, lkcr;
struct mite_struct *mite = mite_chan->mite;
- MDPRINTK("mite_prep_dma ch%i\n", mite_chan->channel);
-
/* reset DMA and FIFO */
chor = CHOR_DMARESET | CHOR_FRESET;
writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
@@ -448,8 +443,6 @@ void mite_prep_dma(struct mite_channel *mite_chan,
/* starting address for link chaining */
writel(mite_chan->ring->descriptors_dma_addr,
mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
-
- MDPRINTK("exit mite_prep_dma\n");
}
EXPORT_SYMBOL_GPL(mite_prep_dma);
@@ -515,8 +508,6 @@ unsigned mite_dma_tcr(struct mite_channel *mite_chan)
lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
tcr = readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
- MDPRINTK("mite_dma_tcr ch%i, lkar=0x%08x tcr=%d\n", mite_chan->channel,
- lkar, tcr);
return tcr;
}
@@ -642,140 +633,6 @@ int mite_done(struct mite_channel *mite_chan)
}
EXPORT_SYMBOL_GPL(mite_done);
-#ifdef DEBUG_MITE
-
-/* names of bits in mite registers */
-
-static const char *const mite_CHOR_strings[] = {
- "start", "cont", "stop", "abort",
- "freset", "clrlc", "clrrb", "clrdone",
- "clr_lpause", "set_lpause", "clr_send_tc",
- "set_send_tc", "12", "13", "14",
- "15", "16", "17", "18",
- "19", "20", "21", "22",
- "23", "24", "25", "26",
- "27", "28", "29", "30",
- "dmareset",
-};
-
-static const char *const mite_CHCR_strings[] = {
- "continue", "ringbuff", "2", "3",
- "4", "5", "6", "7",
- "8", "9", "10", "11",
- "12", "13", "bursten", "fifodis",
- "clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie",
- "clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie",
- "clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie",
- "clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie",
-};
-
-static const char *const mite_MCR_strings[] = {
- "amdevice", "1", "2", "3",
- "4", "5", "portio", "portvxi",
- "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11",
- "12", "13", "blocken", "berhand",
- "reqsintlim/reqs0", "reqs1", "reqs2", "rd32",
- "rd512", "rl1", "rl2", "rl8",
- "24", "25", "26", "27",
- "28", "29", "30", "stopen",
-};
-
-static const char *const mite_DCR_strings[] = {
- "amdevice", "1", "2", "3",
- "4", "5", "portio", "portvxi",
- "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2",
- "aseqxp8", "13", "blocken", "berhand",
- "reqsintlim", "reqs1", "reqs2", "rd32",
- "rd512", "rl1", "rl2", "rl8",
- "23", "24", "25", "27",
- "28", "wsdevc", "wsdevs", "rwdevpack",
-};
-
-static const char *const mite_LKCR_strings[] = {
- "amdevice", "1", "2", "3",
- "4", "5", "portio", "portvxi",
- "psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown",
- "12", "13", "14", "berhand",
- "16", "17", "18", "rd32",
- "rd512", "rl1", "rl2", "rl8",
- "24", "25", "26", "27",
- "28", "29", "30", "chngend",
-};
-
-static const char *const mite_CHSR_strings[] = {
- "d.err0", "d.err1", "m.err0", "m.err1",
- "l.err0", "l.err1", "drq0", "drq1",
- "end", "xferr", "operr0", "operr1",
- "stops", "habort", "sabort", "error",
- "16", "conts_rb", "18", "linkc",
- "20", "drdy", "22", "mrdy",
- "24", "done", "26", "sars",
- "28", "lpauses", "30", "int",
-};
-
-static void mite_decode(const char *const *bit_str, unsigned int bits)
-{
- int i;
-
- for (i = 31; i >= 0; i--) {
- if (bits & (1 << i))
- pr_debug(" %s\n", bit_str[i]);
- }
-}
-
-void mite_dump_regs(struct mite_channel *mite_chan)
-{
- void __iomem *mite_io_addr = mite_chan->mite->mite_io_addr;
- unsigned int offset;
- unsigned int value;
- int channel = mite_chan->channel;
-
- pr_debug("mite_dump_regs ch%i\n", channel);
- pr_debug("mite address is =%p\n", mite_io_addr);
-
- offset = MITE_CHOR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[CHOR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_CHOR_strings, value);
- offset = MITE_CHCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[CHCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_CHCR_strings, value);
- offset = MITE_TCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[TCR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_MCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[MCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_MCR_strings, value);
- offset = MITE_MAR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[MAR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_DCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[DCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_DCR_strings, value);
- offset = MITE_DAR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[DAR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_LKCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[LKCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_LKCR_strings, value);
- offset = MITE_LKAR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[LKAR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_CHSR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[CHSR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_CHSR_strings, value);
- offset = MITE_FCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[FCR] at 0x%08x =0x%08x\n", offset, value);
-}
-EXPORT_SYMBOL_GPL(mite_dump_regs);
-#endif
-
static int __init mite_module_init(void)
{
return 0;
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 8423b8b..bcf2f97 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -24,15 +24,8 @@
#include <linux/slab.h>
#include "../comedidev.h"
-/* #define DEBUG_MITE */
#define PCIMIO_COMPAT
-#ifdef DEBUG_MITE
-#define MDPRINTK(format, args...) pr_debug(format , ## args)
-#else
-#define MDPRINTK(format, args...) do { } while (0)
-#endif
-
#define MAX_MITE_DMA_CHANNELS 8
struct mite_dma_descriptor {
@@ -129,11 +122,6 @@ void mite_prep_dma(struct mite_channel *mite_chan,
int mite_buf_change(struct mite_dma_descriptor_ring *ring,
struct comedi_async *async);
-#ifdef DEBUG_MITE
-void mite_print_chsr(unsigned int chsr);
-void mite_dump_regs(struct mite_channel *mite_chan);
-#endif
-
static inline int CHAN_OFFSET(int channel)
{
return 0x500 + 0x100 * channel;
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index acbaeee..fe4621e 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -159,11 +159,6 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
* We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc
*/
outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH);
-/* printk("Channel %d:\n", insn->chanspec); */
- if (!insn->n) {
- printk(KERN_INFO "MPC624: Warning, no data to acquire\n");
- return 0;
- }
for (n = 0; n < insn->n; n++) {
/* Trigger the conversion */
@@ -182,11 +177,9 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
else
break;
}
- if (i == TIMEOUT) {
- printk(KERN_ERR "MPC624: timeout (%dms)\n", TIMEOUT);
- data[n] = 0;
+ if (i == TIMEOUT)
return -ETIMEDOUT;
- }
+
/* Start reading data */
data_in = 0;
data_out = devpriv->ulConvertionRate;
@@ -245,11 +238,11 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
*/
if (data_in & MPC624_EOC_BIT)
- printk(KERN_INFO "MPC624:EOC bit is set (data_in=%lu)!",
- data_in);
+ dev_dbg(dev->class_dev,
+ "EOC bit is set (data_in=%lu)!", data_in);
if (data_in & MPC624_DMY_BIT)
- printk(KERN_INFO "MPC624:DMY bit is set (data_in=%lu)!",
- data_in);
+ dev_dbg(dev->class_dev,
+ "DMY bit is set (data_in=%lu)!", data_in);
if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */
/*
* comedi operates on unsigned numbers, so mask off EOC
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 85aa960..860fc81 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -455,7 +455,7 @@ static int ni6527_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni6527_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = {
+static const struct pci_device_id ni6527_pci_table[] = {
{ PCI_VDEVICE(NI, 0x2b10), BOARD_PXI6527 },
{ PCI_VDEVICE(NI, 0x2b20), BOARD_PCI6527 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 853f62b..6e42001 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -43,9 +43,6 @@ except maybe the 6514.
*/
-#define DEBUG 1
-#define DEBUG_FLAGS
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -430,7 +427,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct ni_65xx_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[2];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int status;
status = readb(devpriv->mite->daq_io_addr + Change_Status);
@@ -741,7 +738,7 @@ static int ni_65xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_65xx_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_65xx_pci_table) = {
+static const struct pci_device_id ni_65xx_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1710), BOARD_PXI6509 },
{ PCI_VDEVICE(NI, 0x7085), BOARD_PCI6509 },
{ PCI_VDEVICE(NI, 0x7086), BOARD_PXI6528 },
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 8a991dc..df42e39 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -55,112 +55,112 @@ for 4 */
#define MAX_DMA_CHANNEL 4
/* See Register-Level Programmer Manual page 3.1 */
-enum NI_660x_Register {
- G0InterruptAcknowledge,
- G0StatusRegister,
- G1InterruptAcknowledge,
- G1StatusRegister,
- G01StatusRegister,
- G0CommandRegister,
- STCDIOParallelInput,
- G1CommandRegister,
- G0HWSaveRegister,
- G1HWSaveRegister,
- STCDIOOutput,
- STCDIOControl,
- G0SWSaveRegister,
- G1SWSaveRegister,
- G0ModeRegister,
- G01JointStatus1Register,
- G1ModeRegister,
- STCDIOSerialInput,
- G0LoadARegister,
- G01JointStatus2Register,
- G0LoadBRegister,
- G1LoadARegister,
- G1LoadBRegister,
- G0InputSelectRegister,
- G1InputSelectRegister,
- G0AutoincrementRegister,
- G1AutoincrementRegister,
- G01JointResetRegister,
- G0InterruptEnable,
- G1InterruptEnable,
- G0CountingModeRegister,
- G1CountingModeRegister,
- G0SecondGateRegister,
- G1SecondGateRegister,
- G0DMAConfigRegister,
- G0DMAStatusRegister,
- G1DMAConfigRegister,
- G1DMAStatusRegister,
- G2InterruptAcknowledge,
- G2StatusRegister,
- G3InterruptAcknowledge,
- G3StatusRegister,
- G23StatusRegister,
- G2CommandRegister,
- G3CommandRegister,
- G2HWSaveRegister,
- G3HWSaveRegister,
- G2SWSaveRegister,
- G3SWSaveRegister,
- G2ModeRegister,
- G23JointStatus1Register,
- G3ModeRegister,
- G2LoadARegister,
- G23JointStatus2Register,
- G2LoadBRegister,
- G3LoadARegister,
- G3LoadBRegister,
- G2InputSelectRegister,
- G3InputSelectRegister,
- G2AutoincrementRegister,
- G3AutoincrementRegister,
- G23JointResetRegister,
- G2InterruptEnable,
- G3InterruptEnable,
- G2CountingModeRegister,
- G3CountingModeRegister,
- G3SecondGateRegister,
- G2SecondGateRegister,
- G2DMAConfigRegister,
- G2DMAStatusRegister,
- G3DMAConfigRegister,
- G3DMAStatusRegister,
- DIO32Input,
- DIO32Output,
- ClockConfigRegister,
- GlobalInterruptStatusRegister,
- DMAConfigRegister,
- GlobalInterruptConfigRegister,
- IOConfigReg0_1,
- IOConfigReg2_3,
- IOConfigReg4_5,
- IOConfigReg6_7,
- IOConfigReg8_9,
- IOConfigReg10_11,
- IOConfigReg12_13,
- IOConfigReg14_15,
- IOConfigReg16_17,
- IOConfigReg18_19,
- IOConfigReg20_21,
- IOConfigReg22_23,
- IOConfigReg24_25,
- IOConfigReg26_27,
- IOConfigReg28_29,
- IOConfigReg30_31,
- IOConfigReg32_33,
- IOConfigReg34_35,
- IOConfigReg36_37,
- IOConfigReg38_39,
- NumRegisters,
+enum ni_660x_register {
+ NI660X_G0_INT_ACK,
+ NI660X_G0_STATUS,
+ NI660X_G1_INT_ACK,
+ NI660X_G1_STATUS,
+ NI660X_G01_STATUS,
+ NI660X_G0_CMD,
+ NI660X_STC_DIO_PARALLEL_INPUT,
+ NI660X_G1_CMD,
+ NI660X_G0_HW_SAVE,
+ NI660X_G1_HW_SAVE,
+ NI660X_STC_DIO_OUTPUT,
+ NI660X_STC_DIO_CONTROL,
+ NI660X_G0_SW_SAVE,
+ NI660X_G1_SW_SAVE,
+ NI660X_G0_MODE,
+ NI660X_G01_STATUS1,
+ NI660X_G1_MODE,
+ NI660X_STC_DIO_SERIAL_INPUT,
+ NI660X_G0_LOADA,
+ NI660X_G01_STATUS2,
+ NI660X_G0_LOADB,
+ NI660X_G1_LOADA,
+ NI660X_G1_LOADB,
+ NI660X_G0_INPUT_SEL,
+ NI660X_G1_INPUT_SEL,
+ NI660X_G0_AUTO_INC,
+ NI660X_G1_AUTO_INC,
+ NI660X_G01_RESET,
+ NI660X_G0_INT_ENA,
+ NI660X_G1_INT_ENA,
+ NI660X_G0_CNT_MODE,
+ NI660X_G1_CNT_MODE,
+ NI660X_G0_GATE2,
+ NI660X_G1_GATE2,
+ NI660X_G0_DMA_CFG,
+ NI660X_G0_DMA_STATUS,
+ NI660X_G1_DMA_CFG,
+ NI660X_G1_DMA_STATUS,
+ NI660X_G2_INT_ACK,
+ NI660X_G2_STATUS,
+ NI660X_G3_INT_ACK,
+ NI660X_G3_STATUS,
+ NI660X_G23_STATUS,
+ NI660X_G2_CMD,
+ NI660X_G3_CMD,
+ NI660X_G2_HW_SAVE,
+ NI660X_G3_HW_SAVE,
+ NI660X_G2_SW_SAVE,
+ NI660X_G3_SW_SAVE,
+ NI660X_G2_MODE,
+ NI660X_G23_STATUS1,
+ NI660X_G3_MODE,
+ NI660X_G2_LOADA,
+ NI660X_G23_STATUS2,
+ NI660X_G2_LOADB,
+ NI660X_G3_LOADA,
+ NI660X_G3_LOADB,
+ NI660X_G2_INPUT_SEL,
+ NI660X_G3_INPUT_SEL,
+ NI660X_G2_AUTO_INC,
+ NI660X_G3_AUTO_INC,
+ NI660X_G23_RESET,
+ NI660X_G2_INT_ENA,
+ NI660X_G3_INT_ENA,
+ NI660X_G2_CNT_MODE,
+ NI660X_G3_CNT_MODE,
+ NI660X_G3_GATE2,
+ NI660X_G2_GATE2,
+ NI660X_G2_DMA_CFG,
+ NI660X_G2_DMA_STATUS,
+ NI660X_G3_DMA_CFG,
+ NI660X_G3_DMA_STATUS,
+ NI660X_DIO32_INPUT,
+ NI660X_DIO32_OUTPUT,
+ NI660X_CLK_CFG,
+ NI660X_GLOBAL_INT_STATUS,
+ NI660X_DMA_CFG,
+ NI660X_GLOBAL_INT_CFG,
+ NI660X_IO_CFG_0_1,
+ NI660X_IO_CFG_2_3,
+ NI660X_IO_CFG_4_5,
+ NI660X_IO_CFG_6_7,
+ NI660X_IO_CFG_8_9,
+ NI660X_IO_CFG_10_11,
+ NI660X_IO_CFG_12_13,
+ NI660X_IO_CFG_14_15,
+ NI660X_IO_CFG_16_17,
+ NI660X_IO_CFG_18_19,
+ NI660X_IO_CFG_20_21,
+ NI660X_IO_CFG_22_23,
+ NI660X_IO_CFG_24_25,
+ NI660X_IO_CFG_26_27,
+ NI660X_IO_CFG_28_29,
+ NI660X_IO_CFG_30_31,
+ NI660X_IO_CFG_32_33,
+ NI660X_IO_CFG_34_35,
+ NI660X_IO_CFG_36_37,
+ NI660X_IO_CFG_38_39,
+ NI660X_NUM_REGS,
};
static inline unsigned IOConfigReg(unsigned pfi_channel)
{
- unsigned reg = IOConfigReg0_1 + pfi_channel / 2;
- BUG_ON(reg > IOConfigReg38_39);
+ unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
+ BUG_ON(reg > NI660X_IO_CFG_38_39);
return reg;
}
@@ -200,7 +200,7 @@ struct NI_660xRegisterData {
enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
};
-static const struct NI_660xRegisterData registerData[NumRegisters] = {
+static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
{"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
{"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
{"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
@@ -347,11 +347,6 @@ static inline unsigned dma_select_mask(unsigned dma_channel)
enum dma_selection {
dma_selection_none = 0x1f,
};
-static inline unsigned dma_selection_counter(unsigned counter_index)
-{
- BUG_ON(counter_index >= counters_per_chip);
- return counter_index;
-}
static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
{
@@ -444,229 +439,158 @@ static inline unsigned ni_660x_num_counters(struct comedi_device *dev)
return board->n_chips * counters_per_chip;
}
-static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg)
+static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
{
- enum NI_660x_Register ni_660x_register;
switch (reg) {
- case NITIO_G0_Autoincrement_Reg:
- ni_660x_register = G0AutoincrementRegister;
- break;
- case NITIO_G1_Autoincrement_Reg:
- ni_660x_register = G1AutoincrementRegister;
- break;
- case NITIO_G2_Autoincrement_Reg:
- ni_660x_register = G2AutoincrementRegister;
- break;
- case NITIO_G3_Autoincrement_Reg:
- ni_660x_register = G3AutoincrementRegister;
- break;
- case NITIO_G0_Command_Reg:
- ni_660x_register = G0CommandRegister;
- break;
- case NITIO_G1_Command_Reg:
- ni_660x_register = G1CommandRegister;
- break;
- case NITIO_G2_Command_Reg:
- ni_660x_register = G2CommandRegister;
- break;
- case NITIO_G3_Command_Reg:
- ni_660x_register = G3CommandRegister;
- break;
- case NITIO_G0_HW_Save_Reg:
- ni_660x_register = G0HWSaveRegister;
- break;
- case NITIO_G1_HW_Save_Reg:
- ni_660x_register = G1HWSaveRegister;
- break;
- case NITIO_G2_HW_Save_Reg:
- ni_660x_register = G2HWSaveRegister;
- break;
- case NITIO_G3_HW_Save_Reg:
- ni_660x_register = G3HWSaveRegister;
- break;
- case NITIO_G0_SW_Save_Reg:
- ni_660x_register = G0SWSaveRegister;
- break;
- case NITIO_G1_SW_Save_Reg:
- ni_660x_register = G1SWSaveRegister;
- break;
- case NITIO_G2_SW_Save_Reg:
- ni_660x_register = G2SWSaveRegister;
- break;
- case NITIO_G3_SW_Save_Reg:
- ni_660x_register = G3SWSaveRegister;
- break;
- case NITIO_G0_Mode_Reg:
- ni_660x_register = G0ModeRegister;
- break;
- case NITIO_G1_Mode_Reg:
- ni_660x_register = G1ModeRegister;
- break;
- case NITIO_G2_Mode_Reg:
- ni_660x_register = G2ModeRegister;
- break;
- case NITIO_G3_Mode_Reg:
- ni_660x_register = G3ModeRegister;
- break;
- case NITIO_G0_LoadA_Reg:
- ni_660x_register = G0LoadARegister;
- break;
- case NITIO_G1_LoadA_Reg:
- ni_660x_register = G1LoadARegister;
- break;
- case NITIO_G2_LoadA_Reg:
- ni_660x_register = G2LoadARegister;
- break;
- case NITIO_G3_LoadA_Reg:
- ni_660x_register = G3LoadARegister;
- break;
- case NITIO_G0_LoadB_Reg:
- ni_660x_register = G0LoadBRegister;
- break;
- case NITIO_G1_LoadB_Reg:
- ni_660x_register = G1LoadBRegister;
- break;
- case NITIO_G2_LoadB_Reg:
- ni_660x_register = G2LoadBRegister;
- break;
- case NITIO_G3_LoadB_Reg:
- ni_660x_register = G3LoadBRegister;
- break;
- case NITIO_G0_Input_Select_Reg:
- ni_660x_register = G0InputSelectRegister;
- break;
- case NITIO_G1_Input_Select_Reg:
- ni_660x_register = G1InputSelectRegister;
- break;
- case NITIO_G2_Input_Select_Reg:
- ni_660x_register = G2InputSelectRegister;
- break;
- case NITIO_G3_Input_Select_Reg:
- ni_660x_register = G3InputSelectRegister;
- break;
- case NITIO_G01_Status_Reg:
- ni_660x_register = G01StatusRegister;
- break;
- case NITIO_G23_Status_Reg:
- ni_660x_register = G23StatusRegister;
- break;
- case NITIO_G01_Joint_Reset_Reg:
- ni_660x_register = G01JointResetRegister;
- break;
- case NITIO_G23_Joint_Reset_Reg:
- ni_660x_register = G23JointResetRegister;
- break;
- case NITIO_G01_Joint_Status1_Reg:
- ni_660x_register = G01JointStatus1Register;
- break;
- case NITIO_G23_Joint_Status1_Reg:
- ni_660x_register = G23JointStatus1Register;
- break;
- case NITIO_G01_Joint_Status2_Reg:
- ni_660x_register = G01JointStatus2Register;
- break;
- case NITIO_G23_Joint_Status2_Reg:
- ni_660x_register = G23JointStatus2Register;
- break;
- case NITIO_G0_Counting_Mode_Reg:
- ni_660x_register = G0CountingModeRegister;
- break;
- case NITIO_G1_Counting_Mode_Reg:
- ni_660x_register = G1CountingModeRegister;
- break;
- case NITIO_G2_Counting_Mode_Reg:
- ni_660x_register = G2CountingModeRegister;
- break;
- case NITIO_G3_Counting_Mode_Reg:
- ni_660x_register = G3CountingModeRegister;
- break;
- case NITIO_G0_Second_Gate_Reg:
- ni_660x_register = G0SecondGateRegister;
- break;
- case NITIO_G1_Second_Gate_Reg:
- ni_660x_register = G1SecondGateRegister;
- break;
- case NITIO_G2_Second_Gate_Reg:
- ni_660x_register = G2SecondGateRegister;
- break;
- case NITIO_G3_Second_Gate_Reg:
- ni_660x_register = G3SecondGateRegister;
- break;
- case NITIO_G0_DMA_Config_Reg:
- ni_660x_register = G0DMAConfigRegister;
- break;
- case NITIO_G0_DMA_Status_Reg:
- ni_660x_register = G0DMAStatusRegister;
- break;
- case NITIO_G1_DMA_Config_Reg:
- ni_660x_register = G1DMAConfigRegister;
- break;
- case NITIO_G1_DMA_Status_Reg:
- ni_660x_register = G1DMAStatusRegister;
- break;
- case NITIO_G2_DMA_Config_Reg:
- ni_660x_register = G2DMAConfigRegister;
- break;
- case NITIO_G2_DMA_Status_Reg:
- ni_660x_register = G2DMAStatusRegister;
- break;
- case NITIO_G3_DMA_Config_Reg:
- ni_660x_register = G3DMAConfigRegister;
- break;
- case NITIO_G3_DMA_Status_Reg:
- ni_660x_register = G3DMAStatusRegister;
- break;
- case NITIO_G0_Interrupt_Acknowledge_Reg:
- ni_660x_register = G0InterruptAcknowledge;
- break;
- case NITIO_G1_Interrupt_Acknowledge_Reg:
- ni_660x_register = G1InterruptAcknowledge;
- break;
- case NITIO_G2_Interrupt_Acknowledge_Reg:
- ni_660x_register = G2InterruptAcknowledge;
- break;
- case NITIO_G3_Interrupt_Acknowledge_Reg:
- ni_660x_register = G3InterruptAcknowledge;
- break;
- case NITIO_G0_Status_Reg:
- ni_660x_register = G0StatusRegister;
- break;
- case NITIO_G1_Status_Reg:
- ni_660x_register = G1StatusRegister;
- break;
- case NITIO_G2_Status_Reg:
- ni_660x_register = G2StatusRegister;
- break;
- case NITIO_G3_Status_Reg:
- ni_660x_register = G3StatusRegister;
- break;
- case NITIO_G0_Interrupt_Enable_Reg:
- ni_660x_register = G0InterruptEnable;
- break;
- case NITIO_G1_Interrupt_Enable_Reg:
- ni_660x_register = G1InterruptEnable;
- break;
- case NITIO_G2_Interrupt_Enable_Reg:
- ni_660x_register = G2InterruptEnable;
- break;
- case NITIO_G3_Interrupt_Enable_Reg:
- ni_660x_register = G3InterruptEnable;
- break;
+ case NITIO_G0_AUTO_INC:
+ return NI660X_G0_AUTO_INC;
+ case NITIO_G1_AUTO_INC:
+ return NI660X_G1_AUTO_INC;
+ case NITIO_G2_AUTO_INC:
+ return NI660X_G2_AUTO_INC;
+ case NITIO_G3_AUTO_INC:
+ return NI660X_G3_AUTO_INC;
+ case NITIO_G0_CMD:
+ return NI660X_G0_CMD;
+ case NITIO_G1_CMD:
+ return NI660X_G1_CMD;
+ case NITIO_G2_CMD:
+ return NI660X_G2_CMD;
+ case NITIO_G3_CMD:
+ return NI660X_G3_CMD;
+ case NITIO_G0_HW_SAVE:
+ return NI660X_G0_HW_SAVE;
+ case NITIO_G1_HW_SAVE:
+ return NI660X_G1_HW_SAVE;
+ case NITIO_G2_HW_SAVE:
+ return NI660X_G2_HW_SAVE;
+ case NITIO_G3_HW_SAVE:
+ return NI660X_G3_HW_SAVE;
+ case NITIO_G0_SW_SAVE:
+ return NI660X_G0_SW_SAVE;
+ case NITIO_G1_SW_SAVE:
+ return NI660X_G1_SW_SAVE;
+ case NITIO_G2_SW_SAVE:
+ return NI660X_G2_SW_SAVE;
+ case NITIO_G3_SW_SAVE:
+ return NI660X_G3_SW_SAVE;
+ case NITIO_G0_MODE:
+ return NI660X_G0_MODE;
+ case NITIO_G1_MODE:
+ return NI660X_G1_MODE;
+ case NITIO_G2_MODE:
+ return NI660X_G2_MODE;
+ case NITIO_G3_MODE:
+ return NI660X_G3_MODE;
+ case NITIO_G0_LOADA:
+ return NI660X_G0_LOADA;
+ case NITIO_G1_LOADA:
+ return NI660X_G1_LOADA;
+ case NITIO_G2_LOADA:
+ return NI660X_G2_LOADA;
+ case NITIO_G3_LOADA:
+ return NI660X_G3_LOADA;
+ case NITIO_G0_LOADB:
+ return NI660X_G0_LOADB;
+ case NITIO_G1_LOADB:
+ return NI660X_G1_LOADB;
+ case NITIO_G2_LOADB:
+ return NI660X_G2_LOADB;
+ case NITIO_G3_LOADB:
+ return NI660X_G3_LOADB;
+ case NITIO_G0_INPUT_SEL:
+ return NI660X_G0_INPUT_SEL;
+ case NITIO_G1_INPUT_SEL:
+ return NI660X_G1_INPUT_SEL;
+ case NITIO_G2_INPUT_SEL:
+ return NI660X_G2_INPUT_SEL;
+ case NITIO_G3_INPUT_SEL:
+ return NI660X_G3_INPUT_SEL;
+ case NITIO_G01_STATUS:
+ return NI660X_G01_STATUS;
+ case NITIO_G23_STATUS:
+ return NI660X_G23_STATUS;
+ case NITIO_G01_RESET:
+ return NI660X_G01_RESET;
+ case NITIO_G23_RESET:
+ return NI660X_G23_RESET;
+ case NITIO_G01_STATUS1:
+ return NI660X_G01_STATUS1;
+ case NITIO_G23_STATUS1:
+ return NI660X_G23_STATUS1;
+ case NITIO_G01_STATUS2:
+ return NI660X_G01_STATUS2;
+ case NITIO_G23_STATUS2:
+ return NI660X_G23_STATUS2;
+ case NITIO_G0_CNT_MODE:
+ return NI660X_G0_CNT_MODE;
+ case NITIO_G1_CNT_MODE:
+ return NI660X_G1_CNT_MODE;
+ case NITIO_G2_CNT_MODE:
+ return NI660X_G2_CNT_MODE;
+ case NITIO_G3_CNT_MODE:
+ return NI660X_G3_CNT_MODE;
+ case NITIO_G0_GATE2:
+ return NI660X_G0_GATE2;
+ case NITIO_G1_GATE2:
+ return NI660X_G1_GATE2;
+ case NITIO_G2_GATE2:
+ return NI660X_G2_GATE2;
+ case NITIO_G3_GATE2:
+ return NI660X_G3_GATE2;
+ case NITIO_G0_DMA_CFG:
+ return NI660X_G0_DMA_CFG;
+ case NITIO_G0_DMA_STATUS:
+ return NI660X_G0_DMA_STATUS;
+ case NITIO_G1_DMA_CFG:
+ return NI660X_G1_DMA_CFG;
+ case NITIO_G1_DMA_STATUS:
+ return NI660X_G1_DMA_STATUS;
+ case NITIO_G2_DMA_CFG:
+ return NI660X_G2_DMA_CFG;
+ case NITIO_G2_DMA_STATUS:
+ return NI660X_G2_DMA_STATUS;
+ case NITIO_G3_DMA_CFG:
+ return NI660X_G3_DMA_CFG;
+ case NITIO_G3_DMA_STATUS:
+ return NI660X_G3_DMA_STATUS;
+ case NITIO_G0_INT_ACK:
+ return NI660X_G0_INT_ACK;
+ case NITIO_G1_INT_ACK:
+ return NI660X_G1_INT_ACK;
+ case NITIO_G2_INT_ACK:
+ return NI660X_G2_INT_ACK;
+ case NITIO_G3_INT_ACK:
+ return NI660X_G3_INT_ACK;
+ case NITIO_G0_STATUS:
+ return NI660X_G0_STATUS;
+ case NITIO_G1_STATUS:
+ return NI660X_G1_STATUS;
+ case NITIO_G2_STATUS:
+ return NI660X_G2_STATUS;
+ case NITIO_G3_STATUS:
+ return NI660X_G3_STATUS;
+ case NITIO_G0_INT_ENA:
+ return NI660X_G0_INT_ENA;
+ case NITIO_G1_INT_ENA:
+ return NI660X_G1_INT_ENA;
+ case NITIO_G2_INT_ENA:
+ return NI660X_G2_INT_ENA;
+ case NITIO_G3_INT_ENA:
+ return NI660X_G3_INT_ENA;
default:
BUG();
return 0;
- break;
}
- return ni_660x_register;
}
static inline void ni_660x_write_register(struct comedi_device *dev,
- unsigned chip_index, unsigned bits,
- enum NI_660x_Register reg)
+ unsigned chip, unsigned bits,
+ enum ni_660x_register reg)
{
struct ni_660x_private *devpriv = dev->private;
void __iomem *write_address =
- devpriv->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+ devpriv->mite->daq_io_addr + GPCT_OFFSET[chip] +
registerData[reg].offset;
switch (registerData[reg].size) {
@@ -683,12 +607,12 @@ static inline void ni_660x_write_register(struct comedi_device *dev,
}
static inline unsigned ni_660x_read_register(struct comedi_device *dev,
- unsigned chip_index,
- enum NI_660x_Register reg)
+ unsigned chip,
+ enum ni_660x_register reg)
{
struct ni_660x_private *devpriv = dev->private;
void __iomem *read_address =
- devpriv->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+ devpriv->mite->daq_io_addr + GPCT_OFFSET[chip] +
registerData[reg].offset;
switch (registerData[reg].size) {
@@ -709,18 +633,20 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
- ni_660x_write_register(dev, counter->chip_index, bits,
- ni_660x_register);
+ enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
+ unsigned chip = counter->chip_index;
+
+ ni_660x_write_register(dev, chip, bits, ni_660x_register);
}
static unsigned ni_gpct_read_register(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
- return ni_660x_read_register(dev, counter->chip_index,
- ni_660x_register);
+ enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
+ unsigned chip = counter->chip_index;
+
+ return ni_660x_read_register(dev, chip, ni_660x_register);
}
static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
@@ -728,7 +654,9 @@ static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
struct ni_gpct
*counter)
{
- return priv->mite_rings[counter->chip_index][counter->counter_index];
+ unsigned chip = counter->chip_index;
+
+ return priv->mite_rings[chip][counter->counter_index];
}
static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
@@ -736,18 +664,17 @@ static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
+ unsigned chip = counter->chip_index;
unsigned long flags;
spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[counter->chip_index] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[counter->chip_index] |=
- dma_select_bits(mite_channel,
- dma_selection_counter(counter->counter_index));
- ni_660x_write_register(dev, counter->chip_index,
- devpriv->dma_configuration_soft_copies
- [counter->chip_index] |
- dma_reset_bit(mite_channel), DMAConfigRegister);
+ devpriv->dma_configuration_soft_copies[chip] &=
+ ~dma_select_mask(mite_channel);
+ devpriv->dma_configuration_soft_copies[chip] |=
+ dma_select_bits(mite_channel, counter->counter_index);
+ ni_660x_write_register(dev, chip,
+ devpriv->dma_configuration_soft_copies[chip] |
+ dma_reset_bit(mite_channel), NI660X_DMA_CFG);
mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
@@ -757,16 +684,17 @@ static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
+ unsigned chip = counter->chip_index;
unsigned long flags;
spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[counter->chip_index] &=
+ devpriv->dma_configuration_soft_copies[chip] &=
~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[counter->chip_index] |=
+ devpriv->dma_configuration_soft_copies[chip] |=
dma_select_bits(mite_channel, dma_selection_none);
- ni_660x_write_register(dev, counter->chip_index,
- devpriv->dma_configuration_soft_copies
- [counter->chip_index], DMAConfigRegister);
+ ni_660x_write_register(dev, chip,
+ devpriv->dma_configuration_soft_copies[chip],
+ NI660X_DMA_CFG);
mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
@@ -815,11 +743,9 @@ static void ni_660x_release_mite_channel(struct comedi_device *dev,
static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct ni_gpct *counter = s->private;
int retval;
- struct ni_gpct *counter = subdev_to_counter(s);
-/* const struct comedi_cmd *cmd = &s->async->cmd; */
-
retval = ni_660x_request_mite_channel(dev, counter, COMEDI_INPUT);
if (retval) {
comedi_error(dev,
@@ -827,22 +753,13 @@ static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return retval;
}
ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
- retval = ni_tio_cmd(counter, s->async);
-
- return retval;
-}
-
-static int ni_660x_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- struct ni_gpct *counter = subdev_to_counter(s);
- return ni_tio_cmdtest(counter, cmd);
+ return ni_tio_cmd(dev, s);
}
static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct ni_gpct *counter = subdev_to_counter(s);
+ struct ni_gpct *counter = s->private;
int retval;
retval = ni_tio_cancel(counter);
@@ -850,23 +767,28 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return retval;
}
-static void set_tio_counterswap(struct comedi_device *dev, int chipset)
+static void set_tio_counterswap(struct comedi_device *dev, int chip)
{
- /* See P. 3.5 of the Register-Level Programming manual. The
- CounterSwap bit has to be set on the second chip, otherwise
- it will try to use the same pins as the first chip.
+ unsigned bits = 0;
+
+ /*
+ * See P. 3.5 of the Register-Level Programming manual.
+ * The CounterSwap bit has to be set on the second chip,
+ * otherwise it will try to use the same pins as the
+ * first chip.
*/
- if (chipset)
- ni_660x_write_register(dev, chipset, CounterSwap,
- ClockConfigRegister);
- else
- ni_660x_write_register(dev, chipset, 0, ClockConfigRegister);
+ if (chip)
+ bits = CounterSwap;
+
+ ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG);
}
static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- ni_tio_handle_interrupt(subdev_to_counter(s), s);
+ struct ni_gpct *counter = s->private;
+
+ ni_tio_handle_interrupt(counter, s);
if (s->async->events) {
if (s->async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
COMEDI_CB_OVERFLOW)) {
@@ -901,11 +823,12 @@ static int ni_660x_input_poll(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_660x_private *devpriv = dev->private;
+ struct ni_gpct *counter = s->private;
unsigned long flags;
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- mite_sync_input_dma(subdev_to_counter(s)->mite_chan, s->async);
+ mite_sync_input_dma(counter->mite_chan, s->async);
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return comedi_buf_read_n_available(s->async);
}
@@ -915,10 +838,10 @@ static int ni_660x_buf_change(struct comedi_device *dev,
unsigned long new_size)
{
struct ni_660x_private *devpriv = dev->private;
+ struct ni_gpct *counter = s->private;
int ret;
- ret = mite_buf_change(mite_ring(devpriv, subdev_to_counter(s)),
- s->async);
+ ret = mite_buf_change(mite_ring(devpriv, counter), s->async);
if (ret < 0)
return ret;
@@ -974,13 +897,6 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev)
}
}
-static int
-ni_660x_GPCT_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return ni_tio_rinsn(subdev_to_counter(s), insn, data);
-}
-
static void init_tio_chip(struct comedi_device *dev, int chipset)
{
struct ni_660x_private *devpriv = dev->private;
@@ -994,25 +910,11 @@ static void init_tio_chip(struct comedi_device *dev, int chipset)
}
ni_660x_write_register(dev, chipset,
devpriv->dma_configuration_soft_copies[chipset],
- DMAConfigRegister);
+ NI660X_DMA_CFG);
for (i = 0; i < NUM_PFI_CHANNELS; ++i)
ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
}
-static int
-ni_660x_GPCT_insn_config(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return ni_tio_insn_config(subdev_to_counter(s), insn, data);
-}
-
-static int ni_660x_GPCT_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return ni_tio_winsn(subdev_to_counter(s), insn, data);
-}
-
static int ni_660x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -1024,13 +926,13 @@ static int ni_660x_dio_insn_bits(struct comedi_device *dev,
s->state &= ~(data[0] << base_bitfield_channel);
s->state |= (data[0] & data[1]) << base_bitfield_channel;
/* Write out the new digital output lines */
- ni_660x_write_register(dev, 0, s->state, DIO32Output);
+ ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT);
}
/* on return, data[1] contains the value of the digital
* input and output lines. */
- data[1] =
- (ni_660x_read_register(dev, 0,
- DIO32Input) >> base_bitfield_channel);
+ data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >>
+ base_bitfield_channel);
+
return insn->n;
}
@@ -1215,7 +1117,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
s->insn_config = ni_660x_dio_insn_config;
/* we use the ioconfig registers to control dio direction, so zero
output enables in stc dio control reg */
- ni_660x_write_register(dev, 0, 0, STCDIOControl);
+ ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL);
devpriv->counter_dev = ni_gpct_device_construct(dev,
&ni_gpct_write_register,
@@ -1234,12 +1136,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
SDF_CMD_READ /* | SDF_CMD_WRITE */ ;
s->n_chan = 3;
s->maxdata = 0xffffffff;
- s->insn_read = ni_660x_GPCT_rinsn;
- s->insn_write = ni_660x_GPCT_winsn;
- s->insn_config = ni_660x_GPCT_insn_config;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
+ s->insn_config = ni_tio_insn_config;
s->do_cmd = &ni_660x_cmd;
s->len_chanlist = 1;
- s->do_cmdtest = &ni_660x_cmdtest;
+ s->do_cmdtest = ni_tio_cmdtest;
s->cancel = &ni_660x_cancel;
s->poll = &ni_660x_input_poll;
s->async_dma_dir = DMA_BIDIRECTIONAL;
@@ -1284,7 +1186,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
if (board->n_chips > 1)
global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
ni_660x_write_register(dev, 0, global_interrupt_config_bits,
- GlobalInterruptConfigRegister);
+ NI660X_GLOBAL_INT_CFG);
dev_info(dev->class_dev, "ni_660x: %s attached\n", dev->board_name);
return 0;
}
@@ -1320,7 +1222,7 @@ static int ni_660x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_660x_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_660x_pci_table) = {
+static const struct pci_device_id ni_660x_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1310), BOARD_PCI6602 },
{ PCI_VDEVICE(NI, 0x1360), BOARD_PXI6602 },
{ PCI_VDEVICE(NI, 0x2c60), BOARD_PCI6601 },
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e4414cf1..8550fdc 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -282,7 +282,7 @@ static int ni_670x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_670x_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_670x_pci_table) = {
+static const struct pci_device_id ni_670x_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1290), BOARD_PCI6704 },
{ PCI_VDEVICE(NI, 0x1920), BOARD_PXI6704 },
{ PCI_VDEVICE(NI, 0x2c90), BOARD_PCI6703 },
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 63c8479..f83eb9e 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -74,9 +74,6 @@ TRIG_WAKE_EOS
#define A2150_SIZE 28
#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
-/* #define A2150_DEBUG enable debugging code */
-#undef A2150_DEBUG /* disable debugging code */
-
/* Registers and bits */
#define CONFIG_REG 0x0
#define CHANNEL_BITS(x) ((x) & 0x7)
@@ -127,10 +124,9 @@ struct a2150_board {
/* analog input range */
static const struct comedi_lrange range_a2150 = {
- 1,
- {
- RANGE(-2.828, 2.828),
- }
+ 1, {
+ BIP_RANGE(2.828)
+ }
};
/* enum must match board indices */
@@ -167,19 +163,6 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
static int a2150_set_chanlist(struct comedi_device *dev,
unsigned int start_channel,
unsigned int num_channels);
-#ifdef A2150_DEBUG
-
-static void ni_dump_regs(struct comedi_device *dev)
-{
- struct a2150_private *devpriv = dev->private;
-
- printk("config bits 0x%x\n", devpriv->config_bits);
- printk("irq dma bits 0x%x\n", devpriv->irq_dma_bits);
- printk("status bits 0x%x\n", inw(dev->iobase + STATUS_REG));
-}
-
-#endif
-
/* interrupt service routine */
static irqreturn_t a2150_interrupt(int irq, void *d)
{
@@ -411,11 +394,6 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned int old_config_bits = devpriv->config_bits;
unsigned int trigger_bits;
- if (!dev->irq || !devpriv->dma) {
- comedi_error(dev,
- " irq and dma required, cannot do hardware conversions");
- return -1;
- }
if (cmd->flags & TRIG_RT) {
comedi_error(dev,
" dma incompatible with hard real-time interrupt (TRIG_RT), aborting");
@@ -506,9 +484,6 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* start acquisition for soft trigger */
if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
-#ifdef A2150_DEBUG
- ni_dump_regs(dev);
-#endif
return 0;
}
@@ -573,13 +548,7 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
comedi_error(dev, "timeout");
return -ETIME;
}
-#ifdef A2150_DEBUG
- ni_dump_regs(dev);
-#endif
data[n] = inw(dev->iobase + FIFO_DATA_REG);
-#ifdef A2150_DEBUG
- printk(" data is %i\n", data[n]);
-#endif
data[n] ^= 0x8000;
}
@@ -728,46 +697,35 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- /* grab our IRQ */
- if (irq) {
- /* check that irq is supported */
- if (irq < 3 || irq == 8 || irq == 13 || irq > 15) {
- printk(" invalid irq line %u\n", irq);
- return -EINVAL;
- }
- if (request_irq(irq, a2150_interrupt, 0,
- dev->driver->driver_name, dev)) {
- printk("unable to allocate irq %u\n", irq);
- return -EINVAL;
+ dev->board_ptr = a2150_boards + a2150_probe(dev);
+ thisboard = comedi_board(dev);
+ dev->board_name = thisboard->name;
+
+ if ((irq >= 3 && irq <= 7) || (irq >= 9 && irq <= 12) ||
+ irq == 14 || irq == 15) {
+ ret = request_irq(irq, a2150_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq);
+ dev->irq = irq;
}
- devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq);
- dev->irq = irq;
}
- /* initialize dma */
- if (dma) {
- if (dma == 4 || dma > 7) {
- printk(" invalid dma channel %u\n", dma);
- return -EINVAL;
- }
- if (request_dma(dma, dev->driver->driver_name)) {
- printk(" failed to allocate dma channel %u\n", dma);
- return -EINVAL;
- }
- devpriv->dma = dma;
- devpriv->dma_buffer =
- kmalloc(A2150_DMA_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
- if (devpriv->dma_buffer == NULL)
- return -ENOMEM;
- disable_dma(dma);
- set_dma_mode(dma, DMA_MODE_READ);
+ if (dev->irq && dma <= 7 && dma != 4) {
+ ret = request_dma(dma, dev->board_name);
+ if (ret == 0) {
+ devpriv->dma = dma;
+ devpriv->dma_buffer = kmalloc(A2150_DMA_BUFFER_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!devpriv->dma_buffer)
+ return -ENOMEM;
- devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma);
- }
+ disable_dma(dma);
+ set_dma_mode(dma, DMA_MODE_READ);
- dev->board_ptr = a2150_boards + a2150_probe(dev);
- thisboard = comedi_board(dev);
- dev->board_name = thisboard->name;
+ devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma);
+ }
+ }
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
@@ -775,17 +733,20 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* analog input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER;
s->n_chan = 4;
- s->len_chanlist = 4;
s->maxdata = 0xffff;
s->range_table = &range_a2150;
- s->do_cmd = a2150_ai_cmd;
- s->do_cmdtest = a2150_ai_cmdtest;
s->insn_read = a2150_ai_rinsn;
- s->cancel = a2150_cancel;
+ if (dev->irq && devpriv->dma) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = a2150_ai_cmd;
+ s->do_cmdtest = a2150_ai_cmdtest;
+ s->cancel = a2150_cancel;
+ }
/* need to do this for software counting of completed conversions, to
* prevent hardware count from stopping acquisition */
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 856c73d..d039352 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -98,8 +98,6 @@ are not supported.
#include "ni_stc.h"
#include "8255.h"
-#undef DEBUG
-
#define ATMIO 1
#undef PCIMIO
@@ -437,19 +435,6 @@ static int ni_atmio_attach(struct comedi_device *dev,
if (ret)
return ret;
-#ifdef DEBUG
- /* board existence sanity check */
- {
- int i;
-
- printk(" board fingerprint:");
- for (i = 0; i < 16; i += 2) {
- printk(" %04x %02x", inw(dev->iobase + i),
- inb(dev->iobase + i + 1));
- }
- }
-#endif
-
/* get board type */
board = ni_getboardtype(dev);
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index a9f7d40..e8cd5dd 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -105,40 +105,31 @@ struct atmio16_board_t {
};
/* range structs */
-static const struct comedi_lrange range_atmio16d_ai_10_bipolar = { 4, {
- BIP_RANGE
- (10),
- BIP_RANGE
- (1),
- BIP_RANGE
- (0.1),
- BIP_RANGE
- (0.02)
- }
+static const struct comedi_lrange range_atmio16d_ai_10_bipolar = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02)
+ }
};
-static const struct comedi_lrange range_atmio16d_ai_5_bipolar = { 4, {
- BIP_RANGE
- (5),
- BIP_RANGE
- (0.5),
- BIP_RANGE
- (0.05),
- BIP_RANGE
- (0.01)
- }
+static const struct comedi_lrange range_atmio16d_ai_5_bipolar = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_atmio16d_ai_unipolar = { 4, {
- UNI_RANGE
- (10),
- UNI_RANGE
- (1),
- UNI_RANGE
- (0.1),
- UNI_RANGE
- (0.02)
- }
+static const struct comedi_lrange range_atmio16d_ai_unipolar = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
+ }
};
/* private data struct */
@@ -229,7 +220,7 @@ static void reset_atmio16d(struct comedi_device *dev)
static irqreturn_t atmio16d_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
comedi_buf_put(s->async, inw(dev->iobase + AD_FIFO_REG));
@@ -495,18 +486,13 @@ static int atmio16d_ai_insn_read(struct comedi_device *dev,
break;
}
if (status & STAT_AD_OVERFLOW) {
- printk(KERN_INFO "atmio16d: a/d FIFO overflow\n");
outw(0, dev->iobase + AD_CLEAR_REG);
-
return -ETIME;
}
}
/* end waiting, now check if it timed out */
- if (t == ATMIO16D_TIMEOUT) {
- printk(KERN_INFO "atmio16d: timeout\n");
-
+ if (t == ATMIO16D_TIMEOUT)
return -ETIME;
- }
}
return i;
@@ -636,7 +622,6 @@ static int atmio16d_attach(struct comedi_device *dev,
const struct atmio16_board_t *board = comedi_board(dev);
struct atmio16d_private *devpriv;
struct comedi_subdevice *s;
- unsigned int irq;
int ret;
ret = comedi_request_region(dev, it->options[0], ATMIO16D_SIZE);
@@ -654,19 +639,11 @@ static int atmio16d_attach(struct comedi_device *dev,
/* reset the atmio16d hardware */
reset_atmio16d(dev);
- /* check if our interrupt is available and get it */
- irq = it->options[1];
- if (irq) {
-
- ret = request_irq(irq, atmio16d_interrupt, 0, "atmio16d", dev);
- if (ret < 0) {
- printk(KERN_INFO "failed to allocate irq %u\n", irq);
- return ret;
- }
- dev->irq = irq;
- printk(KERN_INFO "( irq = %u )\n", irq);
- } else {
- printk(KERN_INFO "( no irq )");
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], atmio16d_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
/* set device options */
@@ -682,16 +659,11 @@ static int atmio16d_attach(struct comedi_device *dev,
/* setup sub-devices */
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = (devpriv->adc_mux ? 16 : 8);
- s->len_chanlist = 16;
s->insn_read = atmio16d_ai_insn_read;
- s->do_cmdtest = atmio16d_ai_cmdtest;
- s->do_cmd = atmio16d_ai_cmd;
- s->cancel = atmio16d_ai_cancel;
s->maxdata = 0xfff; /* 4095 decimal */
switch (devpriv->adc_range) {
case adc_bipolar10:
@@ -704,6 +676,14 @@ static int atmio16d_attach(struct comedi_device *dev,
s->range_table = &range_atmio16d_ai_unipolar;
break;
}
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 16;
+ s->do_cmdtest = atmio16d_ai_cmdtest;
+ s->do_cmd = atmio16d_ai_cmd;
+ s->cancel = atmio16d_ai_cancel;
+ }
/* ao subdevice */
s = &dev->subdevices[1];
@@ -756,7 +736,6 @@ static int atmio16d_attach(struct comedi_device *dev,
s->n_chan = 0;
s->maxdata = 0
#endif
- printk("\n");
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 8be681f..7395970 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -107,7 +107,7 @@ static struct comedi_driver labpc_pci_comedi_driver = {
.detach = labpc_pci_detach,
};
-static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = {
+static const struct pci_device_id labpc_pci_table[] = {
{ PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5113397..457b884 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -52,10 +52,6 @@
fully tested as yet. Terry Barnaby, BEAM Ltd.
*/
-/* #define DEBUG_INTERRUPT */
-/* #define DEBUG_STATUS_A */
-/* #define DEBUG_STATUS_B */
-
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -63,10 +59,6 @@
#include "mite.h"
#include "comedi_fc.h"
-#ifndef MDPRINTK
-#define MDPRINTK(format, args...)
-#endif
-
/* A timeout count */
#define NI_TIMEOUT 1000
static const unsigned old_RTSI_clock_channel = 7;
@@ -86,111 +78,109 @@ static const short ni_gainlkup[][16] = {
[ai_gain_6143] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
};
-static const struct comedi_lrange range_ni_E_ai = { 16, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1, 1),
- RANGE(-0.5, 0.5),
- RANGE(-0.25, 0.25),
- RANGE(-0.1, 0.1),
- RANGE(-0.05, 0.05),
- RANGE(0, 20),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2),
- RANGE(0, 1),
- RANGE(0, 0.5),
- RANGE(0, 0.2),
- RANGE(0, 0.1),
- }
+static const struct comedi_lrange range_ni_E_ai = {
+ 16, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.25),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.05),
+ UNI_RANGE(20),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_limited = { 8, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-1, 1),
- RANGE(-0.1,
- 0.1),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 1),
- RANGE(0, 0.1),
- }
+static const struct comedi_lrange range_ni_E_ai_limited = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_limited14 = { 14, {
- RANGE(-10,
- 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-0.5,
- 0.5),
- RANGE(-0.2,
- 0.2),
- RANGE(-0.1,
- 0.1),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2),
- RANGE(0, 1),
- RANGE(0,
- 0.5),
- RANGE(0,
- 0.2),
- RANGE(0,
- 0.1),
- }
+static const struct comedi_lrange range_ni_E_ai_limited14 = {
+ 14, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2),
+ BIP_RANGE(0.1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_bipolar4 = { 4, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-0.5,
- 0.5),
- RANGE(-0.05,
- 0.05),
- }
+static const struct comedi_lrange range_ni_E_ai_bipolar4 = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_611x = { 8, {
- RANGE(-50, 50),
- RANGE(-20, 20),
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-0.5, 0.5),
- RANGE(-0.2, 0.2),
- }
+static const struct comedi_lrange range_ni_E_ai_611x = {
+ 8, {
+ BIP_RANGE(50),
+ BIP_RANGE(20),
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2)
+ }
};
-static const struct comedi_lrange range_ni_M_ai_622x = { 4, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-1, 1),
- RANGE(-0.2, 0.2),
- }
+static const struct comedi_lrange range_ni_M_ai_622x = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.2)
+ }
};
-static const struct comedi_lrange range_ni_M_ai_628x = { 7, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-0.5, 0.5),
- RANGE(-0.2, 0.2),
- RANGE(-0.1, 0.1),
- }
+static const struct comedi_lrange range_ni_M_ai_628x = {
+ 7, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2),
+ BIP_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ao_ext = { 4, {
- RANGE(-10, 10),
- RANGE(0, 10),
- RANGE_ext(-1, 1),
- RANGE_ext(0, 1),
- }
+static const struct comedi_lrange range_ni_E_ao_ext = {
+ 4, {
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
+ }
};
static const struct comedi_lrange *const ni_range_lkup[] = {
@@ -266,17 +256,6 @@ static int ni_rtsi_insn_config(struct comedi_device *dev,
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_read_eeprom(struct comedi_device *dev, int addr);
-#ifdef DEBUG_STATUS_A
-static void ni_mio_print_status_a(int status);
-#else
-#define ni_mio_print_status_a(a)
-#endif
-#ifdef DEBUG_STATUS_B
-static void ni_mio_print_status_b(int status);
-#else
-#define ni_mio_print_status_b(a)
-#endif
-
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s);
#ifndef PCIDMA
static void ni_handle_fifo_half_full(struct comedi_device *dev);
@@ -297,19 +276,8 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_8255_callback(int dir, int port, int data, unsigned long arg);
-static int ni_gpct_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int ni_gpct_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int ni_gpct_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
-static int ni_gpct_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd);
#endif
static int ni_gpct_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
@@ -322,10 +290,6 @@ static int cs5529_do_conversion(struct comedi_device *dev,
static int cs5529_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
-#ifdef NI_CS5529_DEBUG
-static unsigned int cs5529_config_read(struct comedi_device *dev,
- unsigned int reg_select_bits);
-#endif
static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
unsigned int reg_select_bits);
@@ -487,11 +451,10 @@ static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
{
unsigned bitfield;
- if (mite_channel >= 0) {
+ if (mite_channel >= 0)
bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel);
- } else {
+ else
bitfield = 0;
- }
ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index),
bitfield);
}
@@ -907,9 +870,8 @@ static void mite_handle_b_linkc(struct mite_struct *mite,
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan) {
+ if (devpriv->ao_mite_chan)
mite_sync_output_dma(devpriv->ao_mite_chan, s->async);
- }
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
@@ -957,9 +919,8 @@ static void ni_handle_eos(struct comedi_device *dev, struct comedi_subdevice *s)
#endif
}
/* handle special case of single scan using AI_End_On_End_Of_Scan */
- if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+ if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan))
shutdown_ai_command(dev);
- }
}
static void shutdown_ai_command(struct comedi_device *dev)
@@ -1023,19 +984,15 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
struct ni_private *devpriv = dev->private;
unsigned short ack = 0;
- if (a_status & AI_SC_TC_St) {
+ if (a_status & AI_SC_TC_St)
ack |= AI_SC_TC_Interrupt_Ack;
- }
- if (a_status & AI_START1_St) {
+ if (a_status & AI_START1_St)
ack |= AI_START1_Interrupt_Ack;
- }
- if (a_status & AI_START_St) {
+ if (a_status & AI_START_St)
ack |= AI_START_Interrupt_Ack;
- }
- if (a_status & AI_STOP_St) {
+ if (a_status & AI_STOP_St)
/* not sure why we used to ack the START here also, instead of doing it independently. Frank Hess 2007-07-06 */
- ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */ ;
- }
+ ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */;
if (ack)
devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
}
@@ -1050,16 +1007,9 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (s->type == COMEDI_SUBD_UNUSED)
return;
-#ifdef DEBUG_INTERRUPT
- printk
- ("ni_mio_common: interrupt: a_status=%04x ai_mite_status=%08x\n",
- status, ai_mite_status);
- ni_mio_print_status_a(status);
-#endif
#ifdef PCIDMA
- if (ai_mite_status & CHSR_LINKC) {
+ if (ai_mite_status & CHSR_LINKC)
ni_sync_ai_dma(dev);
- }
if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
@@ -1067,7 +1017,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
printk
("unknown mite interrupt, ack! (ai_mite_status=%08x)\n",
ai_mite_status);
- /* mite_print_chsr(ai_mite_status); */
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
/* disable_irq(dev->irq); */
}
@@ -1092,7 +1041,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
AI_SC_TC_Error_St)) {
printk("ni_mio_common: ai error a_status=%04x\n",
status);
- ni_mio_print_status_a(status);
shutdown_ai_command(dev);
@@ -1105,12 +1053,8 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
return;
}
if (status & AI_SC_TC_St) {
-#ifdef DEBUG_INTERRUPT
- printk("ni_mio_common: SC_TC interrupt\n");
-#endif
- if (!devpriv->ai_continuous) {
+ if (!devpriv->ai_continuous)
shutdown_ai_command(dev);
- }
}
}
#ifndef PCIDMA
@@ -1129,20 +1073,10 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
}
#endif /* !PCIDMA */
- if ((status & AI_STOP_St)) {
+ if ((status & AI_STOP_St))
ni_handle_eos(dev, s);
- }
ni_event(dev, s);
-
-#ifdef DEBUG_INTERRUPT
- status = devpriv->stc_readw(dev, AI_Status_1_Register);
- if (status & Interrupt_A_St) {
- printk
- ("handle_a_interrupt: didn't clear interrupt? status=0x%x\n",
- status);
- }
-#endif
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1150,27 +1084,20 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
struct ni_private *devpriv = dev->private;
unsigned short ack = 0;
- if (b_status & AO_BC_TC_St) {
+ if (b_status & AO_BC_TC_St)
ack |= AO_BC_TC_Interrupt_Ack;
- }
- if (b_status & AO_Overrun_St) {
+ if (b_status & AO_Overrun_St)
ack |= AO_Error_Interrupt_Ack;
- }
- if (b_status & AO_START_St) {
+ if (b_status & AO_START_St)
ack |= AO_START_Interrupt_Ack;
- }
- if (b_status & AO_START1_St) {
+ if (b_status & AO_START1_St)
ack |= AO_START1_Interrupt_Ack;
- }
- if (b_status & AO_UC_TC_St) {
+ if (b_status & AO_UC_TC_St)
ack |= AO_UC_TC_Interrupt_Ack;
- }
- if (b_status & AO_UI2_TC_St) {
+ if (b_status & AO_UI2_TC_St)
ack |= AO_UI2_TC_Interrupt_Ack;
- }
- if (b_status & AO_UPDATE_St) {
+ if (b_status & AO_UPDATE_St)
ack |= AO_UPDATE_Interrupt_Ack;
- }
if (ack)
devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
}
@@ -1182,17 +1109,10 @@ static void handle_b_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s = &dev->subdevices[NI_AO_SUBDEV];
/* unsigned short ack=0; */
-#ifdef DEBUG_INTERRUPT
- printk("ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n",
- b_status, ao_mite_status);
- ni_mio_print_status_b(b_status);
-#endif
-
#ifdef PCIDMA
/* Currently, mite.c requires us to handle LINKC */
- if (ao_mite_status & CHSR_LINKC) {
+ if (ao_mite_status & CHSR_LINKC)
mite_handle_b_linkc(devpriv->mite, dev);
- }
if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
@@ -1200,7 +1120,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
printk
("unknown mite interrupt, ack! (ao_mite_status=%08x)\n",
ao_mite_status);
- /* mite_print_chsr(ao_mite_status); */
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
#endif
@@ -1214,12 +1133,9 @@ static void handle_b_interrupt(struct comedi_device *dev,
s->async->events |= COMEDI_CB_OVERFLOW;
}
- if (b_status & AO_BC_TC_St) {
- MDPRINTK
- ("ni_mio_common: AO BC_TC status=0x%04x status2=0x%04x\n",
- b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
+ if (b_status & AO_BC_TC_St)
s->async->events |= COMEDI_CB_EOA;
- }
+
#ifndef PCIDMA
if (b_status & AO_FIFO_Request_St) {
int ret;
@@ -1238,50 +1154,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
ni_event(dev, s);
}
-#ifdef DEBUG_STATUS_A
-static const char *const status_a_strings[] = {
- "passthru0", "fifo", "G0_gate", "G0_TC",
- "stop", "start", "sc_tc", "start1",
- "start2", "sc_tc_error", "overflow", "overrun",
- "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a"
-};
-
-static void ni_mio_print_status_a(int status)
-{
- int i;
-
- printk("A status:");
- for (i = 15; i >= 0; i--) {
- if (status & (1 << i)) {
- printk(" %s", status_a_strings[i]);
- }
- }
- printk("\n");
-}
-#endif
-
-#ifdef DEBUG_STATUS_B
-static const char *const status_b_strings[] = {
- "passthru1", "fifo", "G1_gate", "G1_TC",
- "UI2_TC", "UPDATE", "UC_TC", "BC_TC",
- "start1", "overrun", "start", "bc_tc_error",
- "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b"
-};
-
-static void ni_mio_print_status_b(int status)
-{
- int i;
-
- printk("B status:");
- for (i = 15; i >= 0; i--) {
- if (status & (1 << i)) {
- printk(" %s", status_b_strings[i]);
- }
- }
- printk("\n");
-}
-#endif
-
#ifndef PCIDMA
static void ni_ao_fifo_load(struct comedi_device *dev,
@@ -1324,9 +1196,8 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
chan %= cmd->chanlist_len;
}
async->cur_chan = chan;
- if (err == 0) {
+ if (err == 0)
async->events |= COMEDI_CB_OVERFLOW;
- }
}
/*
@@ -2392,7 +2263,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned int stop_count;
int interrupt_a_enable = 0;
- MDPRINTK("ni_ai_cmd\n");
if (dev->irq == 0) {
comedi_error(dev, "cannot run command without an irq");
return -EIO;
@@ -2630,15 +2500,11 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
ni_set_bits(dev, Interrupt_A_Enable_Register,
interrupt_a_enable, 1);
-
- MDPRINTK("Interrupt_A_Enable_Register = 0x%04x\n",
- devpriv->int_a_enable_reg);
} else {
/* interrupt on nothing */
ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0);
/* XXX start polling if necessary */
- MDPRINTK("interrupting on nothing\n");
}
/* end configuration */
@@ -2664,7 +2530,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (retval)
return retval;
}
- /* mite_dump_regs(devpriv->mite); */
#endif
switch (cmd->start_src) {
@@ -2682,8 +2547,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- MDPRINTK("exit ni_ai_cmd\n");
-
return 0;
}
@@ -3248,11 +3111,10 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
- if (cmd->stop_src == TRIG_NONE) {
+ if (cmd->stop_src == TRIG_NONE)
devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register);
- } else {
+ else
devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register);
- }
devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register);
devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
@@ -3513,9 +3375,8 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
if (board->reg_type & ni_reg_6xxx_mask) {
unsigned immediate_bits = 0;
unsigned i;
- for (i = 0; i < s->n_chan; ++i) {
+ for (i = 0; i < s->n_chan; ++i)
immediate_bits |= 1 << i;
- }
ao_win_out(immediate_bits, AO_Immediate_671x);
ao_win_out(CLEAR_WG, AO_Misc_611x);
}
@@ -3689,9 +3550,8 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return -EIO;
}
retval = ni_request_cdo_mite_channel(dev);
- if (retval < 0) {
+ if (retval < 0)
return retval;
- }
s->async->inttrig = &ni_cdo_inttrig;
return 0;
}
@@ -3773,9 +3633,8 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
unsigned long flags;
#endif
- if ((board->reg_type & ni_reg_m_series_mask) == 0) {
+ if ((board->reg_type & ni_reg_m_series_mask) == 0)
return;
- }
#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
@@ -3793,15 +3652,15 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
cdio_status = ni_readl(M_Offset_CDIO_Status);
if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) {
-/* printk("cdio error: statux=0x%x\n", cdio_status); */
+ /* printk("cdio error: statux=0x%x\n", cdio_status); */
ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command); /* XXX just guessing this is needed and does something useful */
s->async->events |= COMEDI_CB_OVERFLOW;
}
if (cdio_status & CDO_FIFO_Empty_Bit) {
-/* printk("cdio fifo empty\n"); */
+ /* printk("cdio fifo empty\n"); */
ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit,
M_Offset_CDIO_Command);
-/* s->async->events |= COMEDI_CB_EOA; */
+ /* s->async->events |= COMEDI_CB_EOA; */
}
ni_event(dev, s);
}
@@ -3819,10 +3678,6 @@ static int ni_serial_insn_config(struct comedi_device *dev,
switch (data[0]) {
case INSN_CONFIG_SERIAL_CLOCK:
-
-#ifdef DEBUG_DIO
- printk("SPI serial clock Config cd\n", data[1]);
-#endif
devpriv->serial_hw_mode = 1;
devpriv->dio_control |= DIO_HW_Serial_Enable;
@@ -3874,9 +3729,8 @@ static int ni_serial_insn_config(struct comedi_device *dev,
case INSN_CONFIG_BIDIRECTIONAL_DATA:
- if (devpriv->serial_interval_ns == 0) {
+ if (devpriv->serial_interval_ns == 0)
return -EINVAL;
- }
byte_out = data[1] & 0xFF;
@@ -3911,10 +3765,6 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
unsigned int status1;
int err = 0, count = 20;
-#ifdef DEBUG_DIO
- printk("ni_serial_hw_readwrite8: outputting 0x%x\n", data_out);
-#endif
-
devpriv->dio_output &= ~DIO_Serial_Data_Mask;
devpriv->dio_output |= DIO_Serial_Data_Out(data_out);
devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register);
@@ -3948,12 +3798,8 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
DIO_Serial_IO_In_Progress_St goes high one bit too early. */
udelay((devpriv->serial_interval_ns + 999) / 1000);
- if (data_in != NULL) {
+ if (data_in != NULL)
*data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register);
-#ifdef DEBUG_DIO
- printk("ni_serial_hw_readwrite8: inputted 0x%x\n", *data_in);
-#endif
- }
Error:
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
@@ -3969,10 +3815,6 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
unsigned char mask, input = 0;
-#ifdef DEBUG_DIO
- printk("ni_serial_sw_readwrite8: outputting 0x%x\n", data_out);
-#endif
-
/* Wait for one bit before transfer */
udelay((devpriv->serial_interval_ns + 999) / 1000);
@@ -3981,9 +3823,8 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
because it is a per-subdevice field, and serial is
a separate subdevice from DIO. */
devpriv->dio_output &= ~DIO_SDOUT;
- if (data_out & mask) {
+ if (data_out & mask)
devpriv->dio_output |= DIO_SDOUT;
- }
devpriv->stc_writew(dev, devpriv->dio_output,
DIO_Output_Register);
@@ -4003,15 +3844,12 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
/* Input current bit */
if (devpriv->stc_readw(dev,
- DIO_Parallel_Input_Register) & DIO_SDIN)
- {
-/* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */
+ DIO_Parallel_Input_Register) & DIO_SDIN) {
+ /* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */
input |= mask;
}
}
-#ifdef DEBUG_DIO
- printk("ni_serial_sw_readwrite8: inputted 0x%x\n", input);
-#endif
+
if (data_in)
*data_in = input;
@@ -4023,9 +3861,8 @@ static void mio_common_detach(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
if (devpriv) {
- if (devpriv->counter_dev) {
+ if (devpriv->counter_dev)
ni_gpct_device_destroy(devpriv->counter_dev);
- }
}
}
@@ -4044,82 +3881,82 @@ static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg)
{
unsigned stc_register;
switch (reg) {
- case NITIO_G0_Autoincrement_Reg:
+ case NITIO_G0_AUTO_INC:
stc_register = G_Autoincrement_Register(0);
break;
- case NITIO_G1_Autoincrement_Reg:
+ case NITIO_G1_AUTO_INC:
stc_register = G_Autoincrement_Register(1);
break;
- case NITIO_G0_Command_Reg:
+ case NITIO_G0_CMD:
stc_register = G_Command_Register(0);
break;
- case NITIO_G1_Command_Reg:
+ case NITIO_G1_CMD:
stc_register = G_Command_Register(1);
break;
- case NITIO_G0_HW_Save_Reg:
+ case NITIO_G0_HW_SAVE:
stc_register = G_HW_Save_Register(0);
break;
- case NITIO_G1_HW_Save_Reg:
+ case NITIO_G1_HW_SAVE:
stc_register = G_HW_Save_Register(1);
break;
- case NITIO_G0_SW_Save_Reg:
+ case NITIO_G0_SW_SAVE:
stc_register = G_Save_Register(0);
break;
- case NITIO_G1_SW_Save_Reg:
+ case NITIO_G1_SW_SAVE:
stc_register = G_Save_Register(1);
break;
- case NITIO_G0_Mode_Reg:
+ case NITIO_G0_MODE:
stc_register = G_Mode_Register(0);
break;
- case NITIO_G1_Mode_Reg:
+ case NITIO_G1_MODE:
stc_register = G_Mode_Register(1);
break;
- case NITIO_G0_LoadA_Reg:
+ case NITIO_G0_LOADA:
stc_register = G_Load_A_Register(0);
break;
- case NITIO_G1_LoadA_Reg:
+ case NITIO_G1_LOADA:
stc_register = G_Load_A_Register(1);
break;
- case NITIO_G0_LoadB_Reg:
+ case NITIO_G0_LOADB:
stc_register = G_Load_B_Register(0);
break;
- case NITIO_G1_LoadB_Reg:
+ case NITIO_G1_LOADB:
stc_register = G_Load_B_Register(1);
break;
- case NITIO_G0_Input_Select_Reg:
+ case NITIO_G0_INPUT_SEL:
stc_register = G_Input_Select_Register(0);
break;
- case NITIO_G1_Input_Select_Reg:
+ case NITIO_G1_INPUT_SEL:
stc_register = G_Input_Select_Register(1);
break;
- case NITIO_G01_Status_Reg:
+ case NITIO_G01_STATUS:
stc_register = G_Status_Register;
break;
- case NITIO_G01_Joint_Reset_Reg:
+ case NITIO_G01_RESET:
stc_register = Joint_Reset_Register;
break;
- case NITIO_G01_Joint_Status1_Reg:
+ case NITIO_G01_STATUS1:
stc_register = Joint_Status_1_Register;
break;
- case NITIO_G01_Joint_Status2_Reg:
+ case NITIO_G01_STATUS2:
stc_register = Joint_Status_2_Register;
break;
- case NITIO_G0_Interrupt_Acknowledge_Reg:
+ case NITIO_G0_INT_ACK:
stc_register = Interrupt_A_Ack_Register;
break;
- case NITIO_G1_Interrupt_Acknowledge_Reg:
+ case NITIO_G1_INT_ACK:
stc_register = Interrupt_B_Ack_Register;
break;
- case NITIO_G0_Status_Reg:
+ case NITIO_G0_STATUS:
stc_register = AI_Status_1_Register;
break;
- case NITIO_G1_Status_Reg:
+ case NITIO_G1_STATUS:
stc_register = AO_Status_1_Register;
break;
- case NITIO_G0_Interrupt_Enable_Reg:
+ case NITIO_G0_INT_ENA:
stc_register = Interrupt_A_Enable_Register;
break;
- case NITIO_G1_Interrupt_Enable_Reg:
+ case NITIO_G1_INT_ENA:
stc_register = Interrupt_B_Enable_Register;
break;
default:
@@ -4147,52 +3984,52 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
switch (reg) {
/* m-series-only registers */
- case NITIO_G0_Counting_Mode_Reg:
+ case NITIO_G0_CNT_MODE:
ni_writew(bits, M_Offset_G0_Counting_Mode);
break;
- case NITIO_G1_Counting_Mode_Reg:
+ case NITIO_G1_CNT_MODE:
ni_writew(bits, M_Offset_G1_Counting_Mode);
break;
- case NITIO_G0_Second_Gate_Reg:
+ case NITIO_G0_GATE2:
ni_writew(bits, M_Offset_G0_Second_Gate);
break;
- case NITIO_G1_Second_Gate_Reg:
+ case NITIO_G1_GATE2:
ni_writew(bits, M_Offset_G1_Second_Gate);
break;
- case NITIO_G0_DMA_Config_Reg:
+ case NITIO_G0_DMA_CFG:
ni_writew(bits, M_Offset_G0_DMA_Config);
break;
- case NITIO_G1_DMA_Config_Reg:
+ case NITIO_G1_DMA_CFG:
ni_writew(bits, M_Offset_G1_DMA_Config);
break;
- case NITIO_G0_ABZ_Reg:
+ case NITIO_G0_ABZ:
ni_writew(bits, M_Offset_G0_MSeries_ABZ);
break;
- case NITIO_G1_ABZ_Reg:
+ case NITIO_G1_ABZ:
ni_writew(bits, M_Offset_G1_MSeries_ABZ);
break;
/* 32 bit registers */
- case NITIO_G0_LoadA_Reg:
- case NITIO_G1_LoadA_Reg:
- case NITIO_G0_LoadB_Reg:
- case NITIO_G1_LoadB_Reg:
+ case NITIO_G0_LOADA:
+ case NITIO_G1_LOADA:
+ case NITIO_G0_LOADB:
+ case NITIO_G1_LOADB:
stc_register = ni_gpct_to_stc_register(reg);
devpriv->stc_writel(dev, bits, stc_register);
break;
/* 16 bit registers */
- case NITIO_G0_Interrupt_Enable_Reg:
+ case NITIO_G0_INT_ENA:
BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
ni_set_bitfield(dev, Interrupt_A_Enable_Register,
gpct_interrupt_a_enable_mask, bits);
break;
- case NITIO_G1_Interrupt_Enable_Reg:
+ case NITIO_G1_INT_ENA:
BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
ni_set_bitfield(dev, Interrupt_B_Enable_Register,
gpct_interrupt_b_enable_mask, bits);
break;
- case NITIO_G01_Joint_Reset_Reg:
+ case NITIO_G01_RESET:
BUG_ON(bits & ~gpct_joint_reset_mask);
/* fall-through */
default:
@@ -4210,21 +4047,18 @@ static unsigned ni_gpct_read_register(struct ni_gpct *counter,
switch (reg) {
/* m-series only registers */
- case NITIO_G0_DMA_Status_Reg:
+ case NITIO_G0_DMA_STATUS:
return ni_readw(M_Offset_G0_DMA_Status);
- break;
- case NITIO_G1_DMA_Status_Reg:
+ case NITIO_G1_DMA_STATUS:
return ni_readw(M_Offset_G1_DMA_Status);
- break;
/* 32 bit registers */
- case NITIO_G0_HW_Save_Reg:
- case NITIO_G1_HW_Save_Reg:
- case NITIO_G0_SW_Save_Reg:
- case NITIO_G1_SW_Save_Reg:
+ case NITIO_G0_HW_SAVE:
+ case NITIO_G1_HW_SAVE:
+ case NITIO_G0_SW_SAVE:
+ case NITIO_G1_SW_SAVE:
stc_register = ni_gpct_to_stc_register(reg);
return devpriv->stc_readl(dev, stc_register);
- break;
/* 16 bit registers */
default:
@@ -4391,11 +4225,10 @@ static int ni_E_init(struct comedi_device *dev)
s->maxdata = (1 << board->aobits) - 1;
s->range_table = board->ao_range_table;
s->insn_read = &ni_ao_insn_read;
- if (board->reg_type & ni_reg_6xxx_mask) {
+ if (board->reg_type & ni_reg_6xxx_mask)
s->insn_write = &ni_ao_insn_write_671x;
- } else {
+ else
s->insn_write = &ni_ao_insn_write;
- }
s->insn_config = &ni_ao_insn_config;
#ifdef PCIDMA
if (board->n_aochan) {
@@ -4429,7 +4262,7 @@ static int ni_E_init(struct comedi_device *dev)
s->n_chan = board->num_p0_dio_channels;
if (board->reg_type & ni_reg_m_series_mask) {
s->subdev_flags |=
- SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */ ;
+ SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */;
s->insn_bits = &ni_m_series_dio_insn_bits;
s->insn_config = &ni_m_series_dio_insn_config;
s->do_cmd = &ni_cdio_cmd;
@@ -4449,11 +4282,10 @@ static int ni_E_init(struct comedi_device *dev)
/* 8255 device */
s = &dev->subdevices[NI_8255_DIO_SUBDEV];
- if (board->has_8255) {
+ if (board->has_8255)
subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev);
- } else {
+ else
s->type = COMEDI_SUBD_UNUSED;
- }
/* formerly general purpose counter/timer device, but no longer used */
s = &dev->subdevices[NI_UNUSED_SUBDEV];
@@ -4511,9 +4343,8 @@ static int ni_E_init(struct comedi_device *dev)
s->n_chan = 10;
}
s->maxdata = 1;
- if (board->reg_type & ni_reg_m_series_mask) {
+ if (board->reg_type & ni_reg_m_series_mask)
s->insn_bits = &ni_pfi_insn_bits;
- }
s->insn_config = &ni_pfi_insn_config;
ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0);
@@ -4553,11 +4384,10 @@ static int ni_E_init(struct comedi_device *dev)
s->insn_config = ni_rtsi_insn_config;
ni_rtsi_init(dev);
- if (board->reg_type & ni_reg_m_series_mask) {
+ if (board->reg_type & ni_reg_m_series_mask)
counter_variant = ni_gpct_variant_m_series;
- } else {
+ else
counter_variant = ni_gpct_variant_e_series;
- }
devpriv->counter_dev = ni_gpct_device_construct(dev,
&ni_gpct_write_register,
&ni_gpct_read_register,
@@ -4573,14 +4403,14 @@ static int ni_E_init(struct comedi_device *dev)
s->maxdata = 0xffffffff;
else
s->maxdata = 0xffffff;
- s->insn_read = &ni_gpct_insn_read;
- s->insn_write = &ni_gpct_insn_write;
- s->insn_config = &ni_gpct_insn_config;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_read;
+ s->insn_config = ni_tio_insn_config;
#ifdef PCIDMA
s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
s->do_cmd = &ni_gpct_cmd;
s->len_chanlist = 1;
- s->do_cmdtest = &ni_gpct_cmdtest;
+ s->do_cmdtest = ni_tio_cmdtest;
s->cancel = &ni_gpct_cancel;
s->async_dma_dir = DMA_BIDIRECTIONAL;
#endif
@@ -4901,7 +4731,7 @@ static int pack_ad8842(int addr, int val, int *bitstring);
struct caldac_struct {
int n_chans;
int n_bits;
- int (*packbits) (int, int, int *);
+ int (*packbits)(int, int, int *);
};
static struct caldac_struct caldacs[] = {
@@ -4944,9 +4774,8 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
if (diffbits) {
unsigned int *maxdata_list;
- if (n_chans > MAX_N_CALDACS) {
+ if (n_chans > MAX_N_CALDACS)
printk("BUG! MAX_N_CALDACS too small\n");
- }
s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
chan = 0;
for (i = 0; i < n_dacs; i++) {
@@ -5143,36 +4972,11 @@ static void GPCT_Reset(struct comedi_device *dev, int chan)
#endif
-static int ni_gpct_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- return ni_tio_insn_config(counter, insn, data);
-}
-
-static int ni_gpct_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- return ni_tio_rinsn(counter, insn, data);
-}
-
-static int ni_gpct_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- return ni_tio_winsn(counter, insn, data);
-}
-
#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- int retval;
struct ni_gpct *counter = s->private;
-/* const struct comedi_cmd *cmd = &s->async->cmd; */
+ int retval;
retval = ni_request_gpct_mite_channel(dev, counter->counter_index,
COMEDI_INPUT);
@@ -5183,19 +4987,8 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
- retval = ni_tio_cmd(counter, s->async);
- return retval;
-}
-#endif
-#ifdef PCIDMA
-static int ni_gpct_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- struct ni_gpct *counter = s->private;
-
- return ni_tio_cmdtest(counter, cmd);
- return -ENOTSUPP;
+ return ni_tio_cmd(dev, s);
}
#endif
@@ -5330,9 +5123,8 @@ static int ni_config_filter(struct comedi_device *dev, unsigned pfi_channel,
struct ni_private *devpriv __maybe_unused = dev->private;
unsigned bits;
- if ((board->reg_type & ni_reg_m_series_mask) == 0) {
+ if ((board->reg_type & ni_reg_m_series_mask) == 0)
return -ENOTSUPP;
- }
bits = ni_readl(M_Offset_PFI_Filter);
bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel);
bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter);
@@ -5413,9 +5205,8 @@ static void ni_rtsi_init(struct comedi_device *dev)
/* Set clock mode to internal */
devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit;
- if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) {
+ if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0)
printk("ni_set_master_clock failed, bug?");
- }
/* default internal lines routing to RTSI bus lines */
devpriv->rtsi_trig_a_output_reg =
RTSI_Trig_Output_Bits(0,
@@ -5598,9 +5389,8 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
devpriv->clock_source = source;
/* it seems to typically take a few hundred microseconds for PLL to lock */
for (i = 0; i < timeout; ++i) {
- if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) {
+ if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit)
break;
- }
udelay(1);
}
if (i == timeout) {
@@ -5822,13 +5612,11 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
for (i = 0; i < timeout; i++) {
status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx);
- if ((status & CSS_ADC_BUSY) == 0) {
+ if ((status & CSS_ADC_BUSY) == 0)
break;
- }
set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(1)) {
+ if (schedule_timeout(1))
return -EIO;
- }
}
/* printk("looped %i times waiting for idle\n", i); */
if (i == timeout) {
@@ -5854,9 +5642,8 @@ static void cs5529_command(struct comedi_device *dev, unsigned short value)
udelay(1);
}
/* printk("looped %i times writing command to cs5529\n", i); */
- if (i == timeout) {
+ if (i == timeout)
comedi_error(dev, "possible problem - never saw adc go busy?");
- }
}
/* write to cs5529 register */
@@ -5873,25 +5660,6 @@ static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
comedi_error(dev, "time or signal in cs5529_config_write()");
}
-#ifdef NI_CS5529_DEBUG
-/* read from cs5529 register */
-static unsigned int cs5529_config_read(struct comedi_device *dev,
- unsigned int reg_select_bits)
-{
- unsigned int value;
-
- reg_select_bits &= CSCMD_REGISTER_SELECT_MASK;
- cs5529_command(dev, CSCMD_COMMAND | CSCMD_READ | reg_select_bits);
- if (cs5529_wait_for_idle(dev))
- comedi_error(dev, "timeout or signal in cs5529_config_read()");
- value = (ni_ao_win_inw(dev,
- CAL_ADC_Config_Data_High_Word_67xx) << 16) &
- 0xff0000;
- value |= ni_ao_win_inw(dev, CAL_ADC_Config_Data_Low_Word_67xx) & 0xffff;
- return value;
-}
-#endif
-
static int cs5529_do_conversion(struct comedi_device *dev, unsigned short *data)
{
int retval;
@@ -5968,12 +5736,5 @@ static int init_cs5529(struct comedi_device *dev)
if (cs5529_wait_for_idle(dev))
comedi_error(dev, "timeout or signal in init_cs5529()\n");
#endif
-#ifdef NI_CS5529_DEBUG
- printk("config: 0x%x\n", cs5529_config_read(dev,
- CSCMD_CONFIG_REGISTER));
- printk("gain: 0x%x\n", cs5529_config_read(dev, CSCMD_GAIN_REGISTER));
- printk("offset: 0x%x\n", cs5529_config_read(dev,
- CSCMD_OFFSET_REGISTER));
-#endif
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 229a273..de42148 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -47,8 +47,6 @@ See the notes in the ni_atmio.o driver.
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-#undef DEBUG
-
#define ATMIO 1
#undef PCIMIO
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index e3a8fa9..30c46a3 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -47,8 +47,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
*/
#define USE_DMA
-/* #define DEBUG 1 */
-/* #define DEBUG_FLAGS */
#include <linux/module.h>
#include <linux/delay.h>
@@ -60,13 +58,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#include "comedi_fc.h"
#include "mite.h"
-#undef DPRINTK
-#ifdef DEBUG
-#define DPRINTK(format, args...) pr_debug(format, ## args)
-#else
-#define DPRINTK(format, args...) do { } while (0)
-#endif
-
#define PCI_DIO_SIZE 4096
#define PCI_MITE_SIZE 4096
@@ -319,14 +310,6 @@ static int ni_pcidio_ns_to_timer(int *nanosec, int round_mode);
static int setup_mite_dma(struct comedi_device *dev,
struct comedi_subdevice *s);
-#ifdef DEBUG_FLAGS
-static void ni_pcidio_print_flags(unsigned int flags);
-static void ni_pcidio_print_status(unsigned int status);
-#else
-#define ni_pcidio_print_flags(x)
-#define ni_pcidio_print_status(x)
-#endif
-
static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
{
struct nidio96_private *devpriv = dev->private;
@@ -401,7 +384,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct nidio96_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct mite_struct *mite = devpriv->mite;
@@ -427,19 +410,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
Interrupt_And_Window_Status);
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
- DPRINTK("ni_pcidio_interrupt: status=0x%02x,flags=0x%02x\n",
- status, flags);
- ni_pcidio_print_flags(flags);
- ni_pcidio_print_status(status);
-
spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
m_status = mite_get_status(devpriv->di_mite_chan);
-#ifdef MITE_DEBUG
- mite_print_chsr(m_status);
-#endif
- /* mite_dump_regs(mite); */
if (m_status & CHSR_INT) {
if (m_status & CHSR_LINKC) {
writel(CHOR_CLRLC,
@@ -450,7 +424,8 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
CHSR_DRQ1 | CHSR_MRDY)) {
- DPRINTK("unknown mite interrupt, disabling IRQ\n");
+ dev_dbg(dev->class_dev,
+ "unknown mite interrupt, disabling IRQ\n");
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
disable_irq(dev->irq);
}
@@ -460,7 +435,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
while (status & DataLeft) {
work++;
if (work > 20) {
- DPRINTK("too much work in interrupt\n");
+ dev_dbg(dev->class_dev, "too much work in interrupt\n");
writeb(0x00,
devpriv->mite->daq_io_addr +
Master_DMA_And_Interrupt_Control);
@@ -470,11 +445,11 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
flags &= IntEn;
if (flags & TransferReady) {
- /* DPRINTK("TransferReady\n"); */
while (flags & TransferReady) {
work++;
if (work > 100) {
- DPRINTK("too much work in interrupt\n");
+ dev_dbg(dev->class_dev,
+ "too much work in interrupt\n");
writeb(0x00,
devpriv->mite->daq_io_addr +
Master_DMA_And_Interrupt_Control
@@ -488,21 +463,13 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
data2 = (auxdata & 0xffff0000) >> 16;
comedi_buf_put(async, data1);
comedi_buf_put(async, data2);
- /* DPRINTK("read:%d, %d\n",data1,data2); */
flags = readb(devpriv->mite->daq_io_addr +
Group_1_Flags);
}
- /* DPRINTK("buf_int_count: %d\n",
- async->buf_int_count); */
- /* DPRINTK("1) IntEn=%d,flags=%d,status=%d\n",
- IntEn,flags,status); */
- /* ni_pcidio_print_flags(flags); */
- /* ni_pcidio_print_status(status); */
async->events |= COMEDI_CB_BLOCK;
}
if (flags & CountExpired) {
- DPRINTK("CountExpired\n");
writeb(ClearExpired,
devpriv->mite->daq_io_addr +
Group_1_Second_Clear);
@@ -511,41 +478,26 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
writeb(0x00, devpriv->mite->daq_io_addr + OpMode);
break;
} else if (flags & Waited) {
- DPRINTK("Waited\n");
writeb(ClearWaited,
devpriv->mite->daq_io_addr +
Group_1_First_Clear);
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
break;
} else if (flags & PrimaryTC) {
- DPRINTK("PrimaryTC\n");
writeb(ClearPrimaryTC,
devpriv->mite->daq_io_addr +
Group_1_First_Clear);
async->events |= COMEDI_CB_EOA;
} else if (flags & SecondaryTC) {
- DPRINTK("SecondaryTC\n");
writeb(ClearSecondaryTC,
devpriv->mite->daq_io_addr +
Group_1_First_Clear);
async->events |= COMEDI_CB_EOA;
}
-#if 0
- else {
- DPRINTK("ni_pcidio: unknown interrupt\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- writeb(0x00,
- devpriv->mite->daq_io_addr +
- Master_DMA_And_Interrupt_Control);
- }
-#endif
+
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
status = readb(devpriv->mite->daq_io_addr +
Interrupt_And_Window_Status);
- /* DPRINTK("loop end: IntEn=0x%02x,flags=0x%02x,"
- "status=0x%02x\n", IntEn, flags, status); */
- /* ni_pcidio_print_flags(flags); */
- /* ni_pcidio_print_status(status); */
}
out:
@@ -562,82 +514,6 @@ out:
return IRQ_HANDLED;
}
-#ifdef DEBUG_FLAGS
-static const char *bit_set_string(unsigned int bits, unsigned int bit,
- const char *const strings[])
-{
- return (bits & (1U << bit)) ? strings[bit] : "";
-}
-
-static const char *const flags_strings[] = {
- " TransferReady", " CountExpired", " 2", " 3",
- " 4", " Waited", " PrimaryTC", " SecondaryTC",
-};
-
-
-static void ni_pcidio_print_flags(unsigned int flags)
-{
- pr_debug("group_1_flags:%s%s%s%s%s%s%s%s\n",
- bit_set_string(flags, 7, flags_strings),
- bit_set_string(flags, 6, flags_strings),
- bit_set_string(flags, 5, flags_strings),
- bit_set_string(flags, 4, flags_strings),
- bit_set_string(flags, 3, flags_strings),
- bit_set_string(flags, 2, flags_strings),
- bit_set_string(flags, 1, flags_strings),
- bit_set_string(flags, 0, flags_strings));
-}
-
-static const char *const status_strings[] = {
- " DataLeft1", " Reserved1", " Req1", " StopTrig1",
- " DataLeft2", " Reserved2", " Req2", " StopTrig2",
-};
-
-static void ni_pcidio_print_status(unsigned int flags)
-{
- pr_debug("group_status:%s%s%s%s%s%s%s%s\n",
- bit_set_string(flags, 7, status_strings),
- bit_set_string(flags, 6, status_strings),
- bit_set_string(flags, 5, status_strings),
- bit_set_string(flags, 4, status_strings),
- bit_set_string(flags, 3, status_strings),
- bit_set_string(flags, 2, status_strings),
- bit_set_string(flags, 1, status_strings),
- bit_set_string(flags, 0, status_strings));
-}
-#endif
-
-#ifdef unused
-static void debug_int(struct comedi_device *dev)
-{
- struct nidio96_private *devpriv = dev->private;
- int a, b;
- static int n_int;
- struct timeval tv;
-
- do_gettimeofday(&tv);
- a = readb(devpriv->mite->daq_io_addr + Group_Status);
- b = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
-
- if (n_int < 10) {
- DPRINTK("status 0x%02x flags 0x%02x time %06d\n", a, b,
- (int)tv.tv_usec);
- }
-
- while (b & 1) {
- writew(0xff, devpriv->mite->daq_io_addr + Group_1_FIFO);
- b = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
- }
-
- b = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
-
- if (n_int < 10) {
- DPRINTK("new status 0x%02x\n", b);
- n_int++;
- }
-}
-#endif
-
static int ni_pcidio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -883,7 +759,6 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
s->async->inttrig = ni_pcidio_inttrig;
}
- DPRINTK("ni_pcidio: command started\n");
return 0;
}
@@ -1074,6 +949,19 @@ static int pci_6534_upload_firmware(struct comedi_device *dev)
return ret;
}
+static void nidio_reset_board(struct comedi_device *dev)
+{
+ struct nidio96_private *devpriv = dev->private;
+ void __iomem *daq_mmio = devpriv->mite->daq_io_addr;
+
+ writel(0, daq_mmio + Port_IO(0));
+ writel(0, daq_mmio + Port_Pin_Directions(0));
+ writel(0, daq_mmio + Port_Pin_Mask(0));
+
+ /* disable interrupts on board */
+ writeb(0, daq_mmio + Master_DMA_And_Interrupt_Control);
+}
+
static int nidio_auto_attach(struct comedi_device *dev,
unsigned long context)
{
@@ -1115,13 +1003,14 @@ static int nidio_auto_attach(struct comedi_device *dev,
if (devpriv->di_mite_ring == NULL)
return -ENOMEM;
- irq = mite_irq(devpriv->mite);
if (board->uses_firmware) {
ret = pci_6534_upload_firmware(dev);
if (ret < 0)
return ret;
}
+ nidio_reset_board(dev);
+
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
@@ -1149,21 +1038,13 @@ static int nidio_auto_attach(struct comedi_device *dev,
s->async_dma_dir = DMA_BIDIRECTIONAL;
s->poll = &ni_pcidio_poll;
- writel(0, devpriv->mite->daq_io_addr + Port_IO(0));
- writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
- writel(0, devpriv->mite->daq_io_addr + Port_Pin_Mask(0));
-
- /* disable interrupts on board */
- writeb(0x00,
- devpriv->mite->daq_io_addr +
- Master_DMA_And_Interrupt_Control);
-
- ret = request_irq(irq, nidio_interrupt, IRQF_SHARED,
- "ni_pcidio", dev);
- if (ret < 0)
- dev_warn(dev->class_dev, "irq not available\n");
-
- dev->irq = irq;
+ irq = mite_irq(devpriv->mite);
+ if (irq) {
+ ret = request_irq(irq, nidio_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = irq;
+ }
return 0;
}
@@ -1200,7 +1081,7 @@ static int ni_pcidio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_pcidio_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_pcidio_pci_table) = {
+static const struct pci_device_id ni_pcidio_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1150), BOARD_PCIDIO_32HS },
{ PCI_VDEVICE(NI, 0x12b0), BOARD_PCI6534 },
{ PCI_VDEVICE(NI, 0x1320), BOARD_PXI6533 },
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 536be83..0ed9804 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -116,8 +116,6 @@ Bugs:
#include "ni_stc.h"
#include "mite.h"
-/* #define PCI_DEBUG */
-
#define PCIDMA
#define PCIMIO 1
@@ -134,24 +132,26 @@ Bugs:
63 different possibilities. An AO channel
can not act as it's own OFFSET or REFERENCE.
*/
-static const struct comedi_lrange range_ni_M_628x_ao = { 8, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-5, 15),
- RANGE(0, 10),
- RANGE(3, 7),
- RANGE(4, 6),
- RANGE_ext(-1, 1)
- }
+static const struct comedi_lrange range_ni_M_628x_ao = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ RANGE(-5, 15),
+ UNI_RANGE(10),
+ RANGE(3, 7),
+ RANGE(4, 6),
+ RANGE_ext(-1, 1)
+ }
};
-static const struct comedi_lrange range_ni_M_625x_ao = { 3, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE_ext(-1, 1)
- }
+static const struct comedi_lrange range_ni_M_625x_ao = {
+ 3, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ RANGE_ext(-1, 1)
+ }
};
enum ni_pcimio_boardid {
@@ -1178,9 +1178,9 @@ static void m_series_stc_writew(struct comedi_device *dev, uint16_t data,
offset = M_Offset_AO_FIFO_Clear;
break;
case DIO_Control_Register:
- printk
- ("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n",
- __func__, reg);
+ dev_dbg(dev->class_dev,
+ "%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n",
+ __func__, reg);
return;
break;
case G_Autoincrement_Register(0):
@@ -1471,6 +1471,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_board_struct *board = NULL;
struct ni_private *devpriv;
+ unsigned int irq;
int ret;
if (context < ARRAY_SIZE(ni_boards))
@@ -1532,18 +1533,12 @@ static int pcimio_auto_attach(struct comedi_device *dev,
if (board->reg_type == ni_reg_6143)
init_6143(dev);
- dev->irq = mite_irq(devpriv->mite);
-
- if (dev->irq == 0) {
- pr_warn("unknown irq (bad)\n");
- } else {
- pr_debug("( irq = %u )\n", dev->irq);
- ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
- DRV_NAME, dev);
- if (ret < 0) {
- pr_warn("irq not available\n");
- dev->irq = 0;
- }
+ irq = mite_irq(devpriv->mite);
+ if (irq) {
+ ret = request_irq(irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = irq;
}
ret = ni_E_init(dev);
@@ -1639,7 +1634,7 @@ static int ni_pcimio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_pcimio_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_pcimio_pci_table) = {
+static const struct pci_device_id ni_pcimio_pci_table[] = {
{ PCI_VDEVICE(NI, 0x0162), BOARD_PCIMIO_16XE_50 }, /* 0x1620? */
{ PCI_VDEVICE(NI, 0x1170), BOARD_PCIMIO_16XE_10 },
{ PCI_VDEVICE(NI, 0x1180), BOARD_PCIMIO_16E_1 },
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 9b120c7..92691b4 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -53,10 +53,6 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
unsigned generic_clock_source);
static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter);
-MODULE_AUTHOR("Comedi <comedi@comedi.org>");
-MODULE_DESCRIPTION("Comedi support for NI general-purpose counters");
-MODULE_LICENSE("GPL");
-
static inline enum Gi_Counting_Mode_Reg_Bits Gi_Alternate_Sync_Bit(enum
ni_gpct_variant
variant)
@@ -277,19 +273,6 @@ static inline unsigned NI_660x_RTSI_Second_Gate_Select(unsigned n)
static const unsigned int counter_status_mask =
COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
-static int __init ni_tio_init_module(void)
-{
- return 0;
-}
-
-module_init(ni_tio_init_module);
-
-static void __exit ni_tio_cleanup_module(void)
-{
-}
-
-module_exit(ni_tio_cleanup_module);
-
struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device *dev,
void (*write_register) (struct
ni_gpct
@@ -362,74 +345,64 @@ static int ni_tio_second_gate_registers_present(const struct ni_gpct_device
static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
{
- write_register(counter, Gi_Reset_Bit(counter->counter_index),
- NITIO_Gxx_Joint_Reset_Reg(counter->counter_index));
+ unsigned cidx = counter->counter_index;
+
+ write_register(counter, Gi_Reset_Bit(cidx), NITIO_RESET_REG(cidx));
}
void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
ni_tio_reset_count_and_disarm(counter);
+
/* initialize counter registers */
- counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] =
- 0x0;
- write_register(counter,
- counter_dev->
- regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)],
- NITIO_Gi_Autoincrement_Reg(counter->counter_index));
- ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+ counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
+ write_register(counter, counter_dev->regs[NITIO_AUTO_INC_REG(cidx)],
+ NITIO_AUTO_INC_REG(cidx));
+
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
~0, Gi_Synchronize_Gate_Bit);
- ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0,
- 0);
- counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0;
- write_register(counter,
- counter_dev->
- regs[NITIO_Gi_LoadA_Reg(counter->counter_index)],
- NITIO_Gi_LoadA_Reg(counter->counter_index));
- counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0;
- write_register(counter,
- counter_dev->
- regs[NITIO_Gi_LoadB_Reg(counter->counter_index)],
- NITIO_Gi_LoadB_Reg(counter->counter_index));
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0,
- 0);
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg(counter->
- counter_index), ~0,
- 0);
- }
+
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
+
+ counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
+ write_register(counter, counter_dev->regs[NITIO_LOADA_REG(cidx)],
+ NITIO_LOADA_REG(cidx));
+
+ counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
+ write_register(counter, counter_dev->regs[NITIO_LOADB_REG(cidx)],
+ NITIO_LOADB_REG(cidx));
+
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
+
+ if (ni_tio_counting_mode_registers_present(counter_dev))
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0);
+
if (ni_tio_second_gate_registers_present(counter_dev)) {
- counter_dev->
- regs[NITIO_Gi_Second_Gate_Reg(counter->counter_index)] =
- 0x0;
+ counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
write_register(counter,
- counter_dev->
- regs[NITIO_Gi_Second_Gate_Reg
- (counter->counter_index)],
- NITIO_Gi_Second_Gate_Reg(counter->
- counter_index));
+ counter_dev->regs[NITIO_GATE2_REG(cidx)],
+ NITIO_GATE2_REG(cidx));
}
- ni_tio_set_bits(counter,
- NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0,
- 0x0);
- ni_tio_set_bits(counter,
- NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
- ~0, 0x0);
+
+ ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
+
+ ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), ~0, 0x0);
}
EXPORT_SYMBOL_GPL(ni_tio_init_counter);
static unsigned int ni_tio_counter_status(struct ni_gpct *counter)
{
- unsigned int status = 0;
+ unsigned cidx = counter->counter_index;
const unsigned bits = read_register(counter,
- NITIO_Gxx_Status_Reg(counter->
- counter_index));
- if (bits & Gi_Armed_Bit(counter->counter_index)) {
+ NITIO_SHARED_STATUS_REG(cidx));
+ unsigned int status = 0;
+
+ if (bits & Gi_Armed_Bit(cidx)) {
status |= COMEDI_COUNTER_ARMED;
- if (bits & Gi_Counting_Bit(counter->counter_index))
+ if (bits & Gi_Counting_Bit(cidx))
status |= COMEDI_COUNTER_COUNTING;
}
return status;
@@ -438,8 +411,8 @@ static unsigned int ni_tio_counter_status(struct ni_gpct *counter)
static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned counting_mode_reg =
- NITIO_Gi_Counting_Mode_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx);
static const uint64_t min_normal_sync_period_ps = 25000;
const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter,
ni_tio_generic_clock_src_select
@@ -476,6 +449,7 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
unsigned mode_reg_mask;
unsigned mode_reg_values;
unsigned input_select_bits = 0;
@@ -502,7 +476,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
default:
break;
}
- ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
mode_reg_mask, mode_reg_values);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
@@ -515,15 +489,13 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask;
if (mode & NI_GPCT_INDEX_ENABLE_BIT)
counting_mode_bits |= Gi_Index_Mode_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask |
Gi_Index_Mode_Bit, counting_mode_bits);
ni_tio_set_sync_mode(counter, 0);
}
- ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
Gi_Up_Down_Mask,
(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) <<
Gi_Up_Down_Shift);
@@ -532,8 +504,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
input_select_bits |= Gi_Or_Gate_Bit;
if (mode & NI_GPCT_INVERT_OUTPUT_BIT)
input_select_bits |= Gi_Output_Polarity_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit |
Gi_Output_Polarity_Bit, input_select_bits);
@@ -543,7 +514,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
-
+ unsigned cidx = counter->counter_index;
unsigned command_transient_bits = 0;
if (arm) {
@@ -581,9 +552,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
}
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg
- (counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
Gi_HW_Arm_Select_Mask
(counter_dev->variant) |
Gi_HW_Arm_Enable_Bit,
@@ -592,8 +561,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
} else {
command_transient_bits |= Gi_Disarm_Bit;
}
- ni_tio_set_bits_transient(counter,
- NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, command_transient_bits);
return 0;
}
@@ -717,8 +685,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
unsigned int clock_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
return;
@@ -747,6 +715,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
unsigned int period_ns)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
unsigned input_select_bits = 0;
static const uint64_t pico_per_nano = 1000;
@@ -766,8 +735,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
}
if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
input_select_bits |= Gi_Source_Polarity_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Source_Select_Mask | Gi_Source_Polarity_Bit,
input_select_bits);
ni_tio_set_source_subselect(counter, clock_source);
@@ -791,9 +759,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
return -EINVAL;
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
Gi_Prescale_X2_Bit(counter_dev->variant) |
Gi_Prescale_X8_Bit(counter_dev->variant),
counting_mode_bits);
@@ -806,15 +772,12 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter,
- NITIO_Gi_Counting_Mode_Reg
- (counter->
- counter_index));
+ unsigned cidx = counter->counter_index;
+ const unsigned counting_mode_bits =
+ ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
unsigned bits = 0;
- if (ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index)) &
+ if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
Gi_Source_Polarity_Bit)
bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant))
@@ -827,15 +790,13 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
unsigned clock_source = 0;
unsigned i;
- const unsigned input_select = (ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index))
- & Gi_Source_Select_Mask) >>
- Gi_Source_Select_Shift;
+ const unsigned input_select =
+ (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
+ Gi_Source_Select_Mask) >> Gi_Source_Select_Shift;
switch (input_select) {
case NI_M_Series_Timebase_1_Clock:
@@ -895,12 +856,11 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
{
unsigned clock_source = 0;
+ unsigned cidx = counter->counter_index;
+ const unsigned input_select =
+ (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
+ Gi_Source_Select_Mask) >> Gi_Source_Select_Shift;
unsigned i;
- const unsigned input_select = (ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index))
- & Gi_Source_Select_Mask) >>
- Gi_Source_Select_Shift;
switch (input_select) {
case NI_660x_Timebase_1_Clock:
@@ -1022,6 +982,7 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
unsigned int gate_source)
{
const unsigned mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask;
+ unsigned cidx = counter->counter_index;
unsigned mode_values = 0;
if (gate_source & CR_INVERT)
@@ -1030,7 +991,7 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
mode_values |= Gi_Rising_Edge_Gating_Bits;
else
mode_values |= Gi_Level_Gating_Bits;
- ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
mode_mask, mode_values);
}
@@ -1038,6 +999,7 @@ static int ni_660x_set_first_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
const unsigned selected_gate = CR_CHAN(gate_source);
+ unsigned cidx = counter->counter_index;
/* bits of selected_gate that may be meaningful to input select register */
const unsigned selected_gate_mask = 0x1f;
unsigned ni_660x_gate_select;
@@ -1075,8 +1037,7 @@ static int ni_660x_set_first_gate(struct ni_gpct *counter,
return -EINVAL;
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Gate_Select_Mask,
Gi_Gate_Select_Bits(ni_660x_gate_select));
return 0;
@@ -1086,6 +1047,7 @@ static int ni_m_series_set_first_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
const unsigned selected_gate = CR_CHAN(gate_source);
+ unsigned cidx = counter->counter_index;
/* bits of selected_gate that may be meaningful to input select register */
const unsigned selected_gate_mask = 0x1f;
unsigned ni_m_series_gate_select;
@@ -1124,8 +1086,7 @@ static int ni_m_series_set_first_gate(struct ni_gpct *counter,
return -EINVAL;
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Gate_Select_Mask,
Gi_Gate_Select_Bits(ni_m_series_gate_select));
return 0;
@@ -1135,8 +1096,8 @@ static int ni_660x_set_second_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
const unsigned selected_second_gate = CR_CHAN(gate_source);
/* bits of second_gate that may be meaningful to second gate register */
static const unsigned selected_second_gate_mask = 0x1f;
@@ -1194,8 +1155,8 @@ static int ni_m_series_set_second_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
const unsigned selected_second_gate = CR_CHAN(gate_source);
/* bits of second_gate that may be meaningful to second gate register */
static const unsigned selected_second_gate_mask = 0x1f;
@@ -1222,15 +1183,13 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
switch (gate_index) {
case 0:
if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
- ni_tio_set_bits(counter,
- NITIO_Gi_Mode_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
Gi_Gating_Mode_Mask,
Gi_Gating_Disabled_Bits);
return 0;
@@ -1292,11 +1251,12 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
unsigned int source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
if (counter_dev->variant == ni_gpct_variant_m_series) {
unsigned int abz_reg, shift, mask;
- abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index);
+ abz_reg = NITIO_ABZ_REG(cidx);
switch (index) {
case NI_GPCT_SOURCE_ENCODER_A:
shift = 10;
@@ -1319,7 +1279,6 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
counter_dev->regs[abz_reg] &= ~mask;
counter_dev->regs[abz_reg] |= (source << shift) & mask;
write_register(counter, counter_dev->regs[abz_reg], abz_reg);
-/* printk("%s %x %d %d\n", __func__, counter_dev->regs[abz_reg], index, source); */
return 0;
}
return -EINVAL;
@@ -1491,12 +1450,10 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
unsigned int *gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned mode_bits = ni_tio_get_soft_copy(counter,
- NITIO_Gi_Mode_Reg
- (counter->
- counter_index));
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned mode_bits =
+ ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
unsigned gate_select_bits;
switch (gate_index) {
@@ -1508,8 +1465,7 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
} else {
gate_select_bits =
(ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index)) &
+ NITIO_INPUT_SEL_REG(cidx)) &
Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift;
}
switch (counter_dev->variant) {
@@ -1577,9 +1533,13 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
return 0;
}
-int ni_tio_insn_config(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data)
+int ni_tio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct ni_gpct *counter = s->private;
+
switch (data[0]) {
case INSN_CONFIG_SET_COUNTER_MODE:
return ni_tio_set_counter_mode(counter, data[1]);
@@ -1623,11 +1583,15 @@ int ni_tio_insn_config(struct ni_gpct *counter,
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
-int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn,
- unsigned int *data)
+int ni_tio_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
const unsigned channel = CR_CHAN(insn->chanspec);
+ unsigned cidx = counter->counter_index;
unsigned first_read;
unsigned second_read;
unsigned correct_read;
@@ -1636,65 +1600,57 @@ int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn,
return 0;
switch (channel) {
case 0:
- ni_tio_set_bits(counter,
- NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
Gi_Save_Trace_Bit, 0);
- ni_tio_set_bits(counter,
- NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
Gi_Save_Trace_Bit, Gi_Save_Trace_Bit);
/* The count doesn't get latched until the next clock edge, so it is possible the count
may change (once) while we are reading. Since the read of the SW_Save_Reg isn't
atomic (apparently even when it's a 32 bit register according to 660x docs),
we need to read twice and make sure the reading hasn't changed. If it has,
a third read will be correct since the count value will definitely have latched by then. */
- first_read =
- read_register(counter,
- NITIO_Gi_SW_Save_Reg(counter->counter_index));
- second_read =
- read_register(counter,
- NITIO_Gi_SW_Save_Reg(counter->counter_index));
+ first_read = read_register(counter, NITIO_SW_SAVE_REG(cidx));
+ second_read = read_register(counter, NITIO_SW_SAVE_REG(cidx));
if (first_read != second_read)
correct_read =
- read_register(counter,
- NITIO_Gi_SW_Save_Reg(counter->
- counter_index));
+ read_register(counter, NITIO_SW_SAVE_REG(cidx));
else
correct_read = first_read;
data[0] = correct_read;
return 0;
break;
case 1:
- data[0] =
- counter_dev->
- regs[NITIO_Gi_LoadA_Reg(counter->counter_index)];
+ data[0] = counter_dev->regs[NITIO_LOADA_REG(cidx)];
break;
case 2:
- data[0] =
- counter_dev->
- regs[NITIO_Gi_LoadB_Reg(counter->counter_index)];
+ data[0] = counter_dev->regs[NITIO_LOADB_REG(cidx)];
break;
}
return 0;
}
-EXPORT_SYMBOL_GPL(ni_tio_rinsn);
+EXPORT_SYMBOL_GPL(ni_tio_insn_read);
static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
{
- const unsigned bits = read_register(counter,
- NITIO_Gxx_Status_Reg(counter->
- counter_index));
+ unsigned cidx = counter->counter_index;
+ const unsigned bits =
+ read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
- if (bits & Gi_Next_Load_Source_Bit(counter->counter_index))
- return NITIO_Gi_LoadB_Reg(counter->counter_index);
+ if (bits & Gi_Next_Load_Source_Bit(cidx))
+ return NITIO_LOADB_REG(cidx);
else
- return NITIO_Gi_LoadA_Reg(counter->counter_index);
+ return NITIO_LOADA_REG(cidx);
}
-int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
- unsigned int *data)
+int ni_tio_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
const unsigned channel = CR_CHAN(insn->chanspec);
+ unsigned cidx = counter->counter_index;
unsigned load_reg;
if (insn->n < 1)
@@ -1705,24 +1661,18 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
/* Don't disturb load source select, just use whichever load register is already selected. */
load_reg = ni_tio_next_load_register(counter);
write_register(counter, data[0], load_reg);
- ni_tio_set_bits_transient(counter,
- NITIO_Gi_Command_Reg(counter->
- counter_index),
+ ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, Gi_Load_Bit);
/* restore state of load reg to whatever the user set last set it to */
write_register(counter, counter_dev->regs[load_reg], load_reg);
break;
case 1:
- counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] =
- data[0];
- write_register(counter, data[0],
- NITIO_Gi_LoadA_Reg(counter->counter_index));
+ counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
+ write_register(counter, data[0], NITIO_LOADA_REG(cidx));
break;
case 2:
- counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] =
- data[0];
- write_register(counter, data[0],
- NITIO_Gi_LoadB_Reg(counter->counter_index));
+ counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
+ write_register(counter, data[0], NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
@@ -1730,4 +1680,19 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
}
return 0;
}
-EXPORT_SYMBOL_GPL(ni_tio_winsn);
+EXPORT_SYMBOL_GPL(ni_tio_insn_write);
+
+static int __init ni_tio_init_module(void)
+{
+ return 0;
+}
+module_init(ni_tio_init_module);
+
+static void __exit ni_tio_cleanup_module(void)
+{
+}
+module_exit(ni_tio_cleanup_module);
+
+MODULE_AUTHOR("Comedi <comedi@comedi.org>");
+MODULE_DESCRIPTION("Comedi support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 7e13697..68378da 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -25,77 +25,77 @@ struct mite_struct;
struct ni_gpct_device;
enum ni_gpct_register {
- NITIO_G0_Autoincrement_Reg,
- NITIO_G1_Autoincrement_Reg,
- NITIO_G2_Autoincrement_Reg,
- NITIO_G3_Autoincrement_Reg,
- NITIO_G0_Command_Reg,
- NITIO_G1_Command_Reg,
- NITIO_G2_Command_Reg,
- NITIO_G3_Command_Reg,
- NITIO_G0_HW_Save_Reg,
- NITIO_G1_HW_Save_Reg,
- NITIO_G2_HW_Save_Reg,
- NITIO_G3_HW_Save_Reg,
- NITIO_G0_SW_Save_Reg,
- NITIO_G1_SW_Save_Reg,
- NITIO_G2_SW_Save_Reg,
- NITIO_G3_SW_Save_Reg,
- NITIO_G0_Mode_Reg,
- NITIO_G1_Mode_Reg,
- NITIO_G2_Mode_Reg,
- NITIO_G3_Mode_Reg,
- NITIO_G0_LoadA_Reg,
- NITIO_G1_LoadA_Reg,
- NITIO_G2_LoadA_Reg,
- NITIO_G3_LoadA_Reg,
- NITIO_G0_LoadB_Reg,
- NITIO_G1_LoadB_Reg,
- NITIO_G2_LoadB_Reg,
- NITIO_G3_LoadB_Reg,
- NITIO_G0_Input_Select_Reg,
- NITIO_G1_Input_Select_Reg,
- NITIO_G2_Input_Select_Reg,
- NITIO_G3_Input_Select_Reg,
- NITIO_G0_Counting_Mode_Reg,
- NITIO_G1_Counting_Mode_Reg,
- NITIO_G2_Counting_Mode_Reg,
- NITIO_G3_Counting_Mode_Reg,
- NITIO_G0_Second_Gate_Reg,
- NITIO_G1_Second_Gate_Reg,
- NITIO_G2_Second_Gate_Reg,
- NITIO_G3_Second_Gate_Reg,
- NITIO_G01_Status_Reg,
- NITIO_G23_Status_Reg,
- NITIO_G01_Joint_Reset_Reg,
- NITIO_G23_Joint_Reset_Reg,
- NITIO_G01_Joint_Status1_Reg,
- NITIO_G23_Joint_Status1_Reg,
- NITIO_G01_Joint_Status2_Reg,
- NITIO_G23_Joint_Status2_Reg,
- NITIO_G0_DMA_Config_Reg,
- NITIO_G1_DMA_Config_Reg,
- NITIO_G2_DMA_Config_Reg,
- NITIO_G3_DMA_Config_Reg,
- NITIO_G0_DMA_Status_Reg,
- NITIO_G1_DMA_Status_Reg,
- NITIO_G2_DMA_Status_Reg,
- NITIO_G3_DMA_Status_Reg,
- NITIO_G0_ABZ_Reg,
- NITIO_G1_ABZ_Reg,
- NITIO_G0_Interrupt_Acknowledge_Reg,
- NITIO_G1_Interrupt_Acknowledge_Reg,
- NITIO_G2_Interrupt_Acknowledge_Reg,
- NITIO_G3_Interrupt_Acknowledge_Reg,
- NITIO_G0_Status_Reg,
- NITIO_G1_Status_Reg,
- NITIO_G2_Status_Reg,
- NITIO_G3_Status_Reg,
- NITIO_G0_Interrupt_Enable_Reg,
- NITIO_G1_Interrupt_Enable_Reg,
- NITIO_G2_Interrupt_Enable_Reg,
- NITIO_G3_Interrupt_Enable_Reg,
- NITIO_Num_Registers,
+ NITIO_G0_AUTO_INC,
+ NITIO_G1_AUTO_INC,
+ NITIO_G2_AUTO_INC,
+ NITIO_G3_AUTO_INC,
+ NITIO_G0_CMD,
+ NITIO_G1_CMD,
+ NITIO_G2_CMD,
+ NITIO_G3_CMD,
+ NITIO_G0_HW_SAVE,
+ NITIO_G1_HW_SAVE,
+ NITIO_G2_HW_SAVE,
+ NITIO_G3_HW_SAVE,
+ NITIO_G0_SW_SAVE,
+ NITIO_G1_SW_SAVE,
+ NITIO_G2_SW_SAVE,
+ NITIO_G3_SW_SAVE,
+ NITIO_G0_MODE,
+ NITIO_G1_MODE,
+ NITIO_G2_MODE,
+ NITIO_G3_MODE,
+ NITIO_G0_LOADA,
+ NITIO_G1_LOADA,
+ NITIO_G2_LOADA,
+ NITIO_G3_LOADA,
+ NITIO_G0_LOADB,
+ NITIO_G1_LOADB,
+ NITIO_G2_LOADB,
+ NITIO_G3_LOADB,
+ NITIO_G0_INPUT_SEL,
+ NITIO_G1_INPUT_SEL,
+ NITIO_G2_INPUT_SEL,
+ NITIO_G3_INPUT_SEL,
+ NITIO_G0_CNT_MODE,
+ NITIO_G1_CNT_MODE,
+ NITIO_G2_CNT_MODE,
+ NITIO_G3_CNT_MODE,
+ NITIO_G0_GATE2,
+ NITIO_G1_GATE2,
+ NITIO_G2_GATE2,
+ NITIO_G3_GATE2,
+ NITIO_G01_STATUS,
+ NITIO_G23_STATUS,
+ NITIO_G01_RESET,
+ NITIO_G23_RESET,
+ NITIO_G01_STATUS1,
+ NITIO_G23_STATUS1,
+ NITIO_G01_STATUS2,
+ NITIO_G23_STATUS2,
+ NITIO_G0_DMA_CFG,
+ NITIO_G1_DMA_CFG,
+ NITIO_G2_DMA_CFG,
+ NITIO_G3_DMA_CFG,
+ NITIO_G0_DMA_STATUS,
+ NITIO_G1_DMA_STATUS,
+ NITIO_G2_DMA_STATUS,
+ NITIO_G3_DMA_STATUS,
+ NITIO_G0_ABZ,
+ NITIO_G1_ABZ,
+ NITIO_G0_INT_ACK,
+ NITIO_G1_INT_ACK,
+ NITIO_G2_INT_ACK,
+ NITIO_G3_INT_ACK,
+ NITIO_G0_STATUS,
+ NITIO_G1_STATUS,
+ NITIO_G2_STATUS,
+ NITIO_G3_STATUS,
+ NITIO_G0_INT_ENA,
+ NITIO_G1_INT_ENA,
+ NITIO_G2_INT_ENA,
+ NITIO_G3_INT_ENA,
+ NITIO_NUM_REGS,
};
enum ni_gpct_variant {
@@ -122,48 +122,35 @@ struct ni_gpct_device {
enum ni_gpct_variant variant;
struct ni_gpct *counters;
unsigned num_counters;
- unsigned regs[NITIO_Num_Registers];
+ unsigned regs[NITIO_NUM_REGS];
spinlock_t regs_lock;
};
-extern struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device
- *dev,
- void (*write_register)
- (struct ni_gpct *
- counter, unsigned bits,
- enum ni_gpct_register
- reg),
- unsigned (*read_register)
- (struct ni_gpct *
- counter,
- enum ni_gpct_register
- reg),
- enum ni_gpct_variant
- variant,
- unsigned num_counters);
-extern void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev);
-extern void ni_tio_init_counter(struct ni_gpct *counter);
-extern int ni_tio_rinsn(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data);
-extern int ni_tio_insn_config(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data);
-extern int ni_tio_winsn(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data);
-extern int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async);
-extern int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd);
-extern int ni_tio_cancel(struct ni_gpct *counter);
-extern void ni_tio_handle_interrupt(struct ni_gpct *counter,
- struct comedi_subdevice *s);
-extern void ni_tio_set_mite_channel(struct ni_gpct *counter,
- struct mite_channel *mite_chan);
-extern void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
- int *gate_error, int *tc_error,
- int *perm_stale_data,
- int *stale_data);
-
-static inline struct ni_gpct *subdev_to_counter(struct comedi_subdevice *s)
-{
- return s->private;
-}
+struct ni_gpct_device *
+ni_gpct_device_construct(struct comedi_device *,
+ void (*write_register)(struct ni_gpct *,
+ unsigned bits,
+ enum ni_gpct_register),
+ unsigned (*read_register)(struct ni_gpct *,
+ enum ni_gpct_register),
+ enum ni_gpct_variant,
+ unsigned num_counters);
+void ni_gpct_device_destroy(struct ni_gpct_device *);
+void ni_tio_init_counter(struct ni_gpct *);
+int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data);
+int ni_tio_insn_config(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data);
+int ni_tio_insn_write(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data);
+int ni_tio_cmd(struct comedi_device *, struct comedi_subdevice *);
+int ni_tio_cmdtest(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_cmd *);
+int ni_tio_cancel(struct ni_gpct *);
+void ni_tio_handle_interrupt(struct ni_gpct *, struct comedi_subdevice *);
+void ni_tio_set_mite_channel(struct ni_gpct *, struct mite_channel *);
+void ni_tio_acknowledge_and_confirm(struct ni_gpct *,
+ int *gate_error, int *tc_error,
+ int *perm_stale_data, int *stale_data);
#endif /* _COMEDI_NI_TIO_H */
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index b009876..15b81b8 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -21,409 +21,26 @@
#include "ni_tio.h"
-static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Autoincrement_Reg;
- break;
- case 1:
- return NITIO_G1_Autoincrement_Reg;
- break;
- case 2:
- return NITIO_G2_Autoincrement_Reg;
- break;
- case 3:
- return NITIO_G3_Autoincrement_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Command_Reg;
- break;
- case 1:
- return NITIO_G1_Command_Reg;
- break;
- case 2:
- return NITIO_G2_Command_Reg;
- break;
- case 3:
- return NITIO_G3_Command_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Counting_Mode_Reg;
- break;
- case 1:
- return NITIO_G1_Counting_Mode_Reg;
- break;
- case 2:
- return NITIO_G2_Counting_Mode_Reg;
- break;
- case 3:
- return NITIO_G3_Counting_Mode_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Input_Select_Reg;
- break;
- case 1:
- return NITIO_G1_Input_Select_Reg;
- break;
- case 2:
- return NITIO_G2_Input_Select_Reg;
- break;
- case 3:
- return NITIO_G3_Input_Select_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Joint_Reset_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Joint_Reset_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Joint_Status1_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Joint_Status1_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Joint_Status2_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Joint_Status2_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Status_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Status_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_LoadA_Reg;
- break;
- case 1:
- return NITIO_G1_LoadA_Reg;
- break;
- case 2:
- return NITIO_G2_LoadA_Reg;
- break;
- case 3:
- return NITIO_G3_LoadA_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_LoadB_Reg;
- break;
- case 1:
- return NITIO_G1_LoadB_Reg;
- break;
- case 2:
- return NITIO_G2_LoadB_Reg;
- break;
- case 3:
- return NITIO_G3_LoadB_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Mode_Reg;
- break;
- case 1:
- return NITIO_G1_Mode_Reg;
- break;
- case 2:
- return NITIO_G2_Mode_Reg;
- break;
- case 3:
- return NITIO_G3_Mode_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_SW_Save_Reg;
- break;
- case 1:
- return NITIO_G1_SW_Save_Reg;
- break;
- case 2:
- return NITIO_G2_SW_Save_Reg;
- break;
- case 3:
- return NITIO_G3_SW_Save_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Second_Gate_Reg;
- break;
- case 1:
- return NITIO_G1_Second_Gate_Reg;
- break;
- case 2:
- return NITIO_G2_Second_Gate_Reg;
- break;
- case 3:
- return NITIO_G3_Second_Gate_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_DMA_Config_Reg;
- break;
- case 1:
- return NITIO_G1_DMA_Config_Reg;
- break;
- case 2:
- return NITIO_G2_DMA_Config_Reg;
- break;
- case 3:
- return NITIO_G3_DMA_Config_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_DMA_Status_Reg;
- break;
- case 1:
- return NITIO_G1_DMA_Status_Reg;
- break;
- case 2:
- return NITIO_G2_DMA_Status_Reg;
- break;
- case 3:
- return NITIO_G3_DMA_Status_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_ABZ_Reg;
- break;
- case 1:
- return NITIO_G1_ABZ_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(
- int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Interrupt_Acknowledge_Reg;
- break;
- case 1:
- return NITIO_G1_Interrupt_Acknowledge_Reg;
- break;
- case 2:
- return NITIO_G2_Interrupt_Acknowledge_Reg;
- break;
- case 3:
- return NITIO_G3_Interrupt_Acknowledge_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Status_Reg;
- break;
- case 1:
- return NITIO_G1_Status_Reg;
- break;
- case 2:
- return NITIO_G2_Status_Reg;
- break;
- case 3:
- return NITIO_G3_Status_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg(
- int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Interrupt_Enable_Reg;
- break;
- case 1:
- return NITIO_G1_Interrupt_Enable_Reg;
- break;
- case 2:
- return NITIO_G2_Interrupt_Enable_Reg;
- break;
- case 3:
- return NITIO_G3_Interrupt_Enable_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
+#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
+#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
+#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
+#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
+#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
+#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
+#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
+#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
+#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
+#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
+#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
+#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
+#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
+#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
+#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
+#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
+#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
+#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
+#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
+#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
enum Gi_Auto_Increment_Reg_Bits {
Gi_Auto_Increment_Mask = 0xff
@@ -699,14 +316,14 @@ static inline unsigned Gi_Gate_Interrupt_Enable_Bit(unsigned counter_index)
static inline void write_register(struct ni_gpct *counter, unsigned bits,
enum ni_gpct_register reg)
{
- BUG_ON(reg >= NITIO_Num_Registers);
+ BUG_ON(reg >= NITIO_NUM_REGS);
counter->counter_dev->write_register(counter, bits, reg);
}
static inline unsigned read_register(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
- BUG_ON(reg >= NITIO_Num_Registers);
+ BUG_ON(reg >= NITIO_NUM_REGS);
return counter->counter_dev->read_register(counter, reg);
}
@@ -738,7 +355,7 @@ static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned long flags;
- BUG_ON(register_index >= NITIO_Num_Registers);
+ BUG_ON(register_index >= NITIO_NUM_REGS);
spin_lock_irqsave(&counter_dev->regs_lock, flags);
counter_dev->regs[register_index] &= ~bit_mask;
counter_dev->regs[register_index] |= (bit_values & bit_mask);
@@ -773,7 +390,7 @@ static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
unsigned long flags;
unsigned value;
- BUG_ON(register_index >= NITIO_Num_Registers);
+ BUG_ON(register_index >= NITIO_NUM_REGS);
spin_lock_irqsave(&counter_dev->regs_lock, flags);
value = counter_dev->regs[register_index];
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 45691ef..7d64f88 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -49,14 +49,11 @@ TODO:
#include "ni_tio_internal.h"
#include "mite.h"
-MODULE_AUTHOR("Comedi <comedi@comedi.org>");
-MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters");
-MODULE_LICENSE("GPL");
-
static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
short read_not_write)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
unsigned input_select_bits = 0;
if (enable) {
@@ -65,8 +62,7 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
else
input_select_bits |= Gi_Write_Acknowledges_Irq;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq,
input_select_bits);
switch (counter_dev->variant) {
@@ -83,9 +79,7 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
}
if (read_not_write == 0)
gi_dma_config_bits |= Gi_DMA_Write_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_DMA_Config_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx),
Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit |
Gi_DMA_Write_Bit, gi_dma_config_bits);
}
@@ -122,6 +116,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
struct comedi_cmd *cmd = &async->cmd;
int retval = 0;
@@ -140,8 +135,7 @@ static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async)
BUG();
break;
}
- ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
- Gi_Save_Trace_Bit, 0);
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), Gi_Save_Trace_Bit, 0);
ni_tio_configure_dma(counter, 1, 1);
switch (cmd->start_src) {
case TRIG_NOW:
@@ -185,6 +179,7 @@ static int ni_tio_output_cmd(struct ni_gpct *counter,
static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async)
{
struct comedi_cmd *cmd = &async->cmd;
+ unsigned cidx = counter->counter_index;
int set_gate_source = 0;
unsigned gate_source;
int retval = 0;
@@ -199,19 +194,17 @@ static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async)
if (set_gate_source)
retval = ni_tio_set_gate_src(counter, 0, gate_source);
if (cmd->flags & TRIG_WAKE_EOS) {
- ni_tio_set_bits(counter,
- NITIO_Gi_Interrupt_Enable_Reg(counter->
- counter_index),
- Gi_Gate_Interrupt_Enable_Bit(counter->
- counter_index),
- Gi_Gate_Interrupt_Enable_Bit(counter->
- counter_index));
+ ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx),
+ Gi_Gate_Interrupt_Enable_Bit(cidx),
+ Gi_Gate_Interrupt_Enable_Bit(cidx));
}
return retval;
}
-int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async)
+int ni_tio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct ni_gpct *counter = s->private;
+ struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int retval = 0;
unsigned long flags;
@@ -237,8 +230,11 @@ int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async)
}
EXPORT_SYMBOL_GPL(ni_tio_cmd);
-int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd)
+int ni_tio_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
+ struct ni_gpct *counter = s->private;
int err = 0;
unsigned int sources;
@@ -301,6 +297,7 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
int ni_tio_cancel(struct ni_gpct *counter)
{
+ unsigned cidx = counter->counter_index;
unsigned long flags;
ni_tio_arm(counter, 0, 0);
@@ -310,10 +307,8 @@ int ni_tio_cancel(struct ni_gpct *counter)
spin_unlock_irqrestore(&counter->lock, flags);
ni_tio_configure_dma(counter, 0, 0);
- ni_tio_set_bits(counter,
- NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
- Gi_Gate_Interrupt_Enable_Bit(counter->counter_index),
- 0x0);
+ ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx),
+ Gi_Gate_Interrupt_Enable_Bit(cidx), 0x0);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_cancel);
@@ -353,14 +348,11 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
int *tc_error, int *perm_stale_data,
int *stale_data)
{
+ unsigned cidx = counter->counter_index;
const unsigned short gxx_status = read_register(counter,
- NITIO_Gxx_Status_Reg
- (counter->
- counter_index));
+ NITIO_SHARED_STATUS_REG(cidx));
const unsigned short gi_status = read_register(counter,
- NITIO_Gi_Status_Reg
- (counter->
- counter_index));
+ NITIO_STATUS_REG(cidx));
unsigned ack = 0;
if (gate_error)
@@ -372,8 +364,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
if (stale_data)
*stale_data = 0;
- if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
- ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
+ if (gxx_status & Gi_Gate_Error_Bit(cidx)) {
+ ack |= Gi_Gate_Error_Confirm_Bit(cidx);
if (gate_error) {
/*660x don't support automatic acknowledgement
of gate interrupt via dma read/write
@@ -384,8 +376,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
}
}
}
- if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) {
- ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index);
+ if (gxx_status & Gi_TC_Error_Bit(cidx)) {
+ ack |= Gi_TC_Error_Confirm_Bit(cidx);
if (tc_error)
*tc_error = 1;
}
@@ -396,21 +388,15 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
ack |= Gi_Gate_Interrupt_Ack_Bit;
}
if (ack)
- write_register(counter, ack,
- NITIO_Gi_Interrupt_Acknowledge_Reg
- (counter->counter_index));
- if (ni_tio_get_soft_copy
- (counter,
- NITIO_Gi_Mode_Reg(counter->counter_index)) &
+ write_register(counter, ack, NITIO_INT_ACK_REG(cidx));
+ if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
Gi_Loading_On_Gate_Bit) {
- if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) {
+ if (gxx_status & Gi_Stale_Data_Bit(cidx)) {
if (stale_data)
*stale_data = 1;
}
- if (read_register(counter,
- NITIO_Gxx_Joint_Status2_Reg
- (counter->counter_index)) &
- Gi_Permanent_Stale_Bit(counter->counter_index)) {
+ if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
+ Gi_Permanent_Stale_Bit(cidx)) {
dev_info(counter->counter_dev->dev->class_dev,
"%s: Gi_Permanent_Stale_Data detected.\n",
__func__);
@@ -424,6 +410,7 @@ EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm);
void ni_tio_handle_interrupt(struct ni_gpct *counter,
struct comedi_subdevice *s)
{
+ unsigned cidx = counter->counter_index;
unsigned gpct_mite_status;
unsigned long flags;
int gate_error;
@@ -442,9 +429,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
- if (read_register(counter,
- NITIO_Gi_DMA_Status_Reg
- (counter->counter_index)) & Gi_DRQ_Error_Bit) {
+ if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) &
+ Gi_DRQ_Error_Bit) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_DRQ_Error detected.\n", __func__);
s->async->events |= COMEDI_CB_OVERFLOW;
@@ -484,11 +470,13 @@ static int __init ni_tiocmd_init_module(void)
{
return 0;
}
-
module_init(ni_tiocmd_init_module);
static void __exit ni_tiocmd_cleanup_module(void)
{
}
-
module_exit(ni_tiocmd_cleanup_module);
+
+MODULE_AUTHOR("Comedi <comedi@comedi.org>");
+MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 03315ab..53613b3 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -162,156 +162,172 @@
#define MAX_CHANLIST_LEN 256 /* length of scan list */
-static const struct comedi_lrange range_pcl812pg_ai = { 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125),
- }
+static const struct comedi_lrange range_pcl812pg_ai = {
+ 5, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_pcl812pg2_ai = { 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_pcl812pg2_ai = {
+ 5, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range812_bipolar1_25 = { 1, {
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range812_bipolar1_25 = {
+ 1, {
+ BIP_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range812_bipolar0_625 = { 1, {
- BIP_RANGE
- (0.625),
- }
+static const struct comedi_lrange range812_bipolar0_625 = {
+ 1, {
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range812_bipolar0_3125 = { 1, {
- BIP_RANGE
- (0.3125),
- }
+static const struct comedi_lrange range812_bipolar0_3125 = {
+ 1, {
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_pcl813b_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_pcl813b_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_pcl813b2_ai = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_pcl813b2_ai = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_iso813_1_ai = { 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125),
- }
+static const struct comedi_lrange range_iso813_1_ai = {
+ 5, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_iso813_1_2_ai = { 5, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625),
- }
+static const struct comedi_lrange range_iso813_1_2_ai = {
+ 5, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ UNI_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_iso813_2_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_iso813_2_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_iso813_2_2_ai = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_iso813_2_2_ai = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_acl8113_1_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_acl8113_1_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_acl8113_1_2_ai = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_acl8113_1_2_ai = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_acl8113_2_ai = { 3, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range_acl8113_2_ai = {
+ 3, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_acl8113_2_2_ai = { 3, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- }
+static const struct comedi_lrange range_acl8113_2_2_ai = {
+ 3, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5)
+ }
};
-static const struct comedi_lrange range_acl8112dg_ai = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10),
- }
+static const struct comedi_lrange range_acl8112dg_ai = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
-static const struct comedi_lrange range_acl8112hg_ai = { 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- }
+static const struct comedi_lrange range_acl8112hg_ai = {
+ 12, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_a821pgh_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- }
+static const struct comedi_lrange range_a821pgh_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005)
+ }
};
struct pcl812_board {
@@ -404,9 +420,7 @@ static int pcl812_ai_insn_read(struct comedi_device *dev,
goto conv_finish;
udelay(1);
}
- printk
- ("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n",
- dev->minor, dev->board_name, dev->iobase);
+ dev_dbg(dev->class_dev, "A/D insn read timeout\n");
outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
return -ETIME;
@@ -441,9 +455,7 @@ static int acl8216_ai_insn_read(struct comedi_device *dev,
goto conv_finish;
udelay(1);
}
- printk
- ("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n",
- dev->minor, dev->board_name, dev->iobase);
+ dev_dbg(dev->class_dev, "A/D insn read timeout\n");
outb(0, dev->iobase + PCL812_MODE);
return -ETIME;
@@ -759,7 +771,7 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
unsigned int mask, timeout;
struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int next_chan;
s->async->events = 0;
@@ -786,10 +798,7 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
}
if (err) {
- printk
- ("comedi%d: pcl812: (%s at 0x%lx) "
- "A/D cmd IRQ without DRDY!\n",
- dev->minor, dev->board_name, dev->iobase);
+ dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -865,7 +874,7 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned long dma_flags;
int len, bufptr;
unsigned short *ptr;
@@ -1095,7 +1104,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv;
int ret, subdev;
- unsigned int irq;
unsigned int dma;
unsigned long pages;
struct comedi_subdevice *s;
@@ -1109,31 +1117,13 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk
- (", IRQ %u is out of allowed range, "
- "DISABLING IT", irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl812, 0,
- dev->board_name, dev)) {
- printk
- (", unable to allocate IRQ %u, "
- "DISABLING IT", irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(KERN_INFO ", irq=%u", irq);
- }
- }
- }
+ if ((1 << it->options[1]) & board->IRQbits) {
+ ret = request_irq(it->options[1], interrupt_pcl812, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
-
dma = 0;
devpriv->dma = dma;
if (!dev->irq)
@@ -1141,21 +1131,22 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (((1 << dma) & board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ dev_err(dev->class_dev,
+ "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, dev->board_name);
if (ret) {
- printk(KERN_ERR ", unable to allocate DMA %u, FAIL!\n",
- dma);
+ dev_err(dev->class_dev,
+ "unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
- printk(KERN_INFO ", dma=%u", dma);
pages = 1; /* we want 8KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
@@ -1167,7 +1158,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages);
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
- printk(KERN_ERR ", unable to allocate DMA buffer, FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
@@ -1225,7 +1217,6 @@ no_dma:
break;
}
s->maxdata = board->ai_maxdata;
- s->len_chanlist = MAX_CHANLIST_LEN;
s->range_table = board->rangelist_ai;
if (board->board_type == boardACL8216)
s->insn_read = acl8216_ai_insn_read;
@@ -1233,13 +1224,14 @@ no_dma:
s->insn_read = pcl812_ai_insn_read;
devpriv->use_MPC = board->haveMPC508;
- s->cancel = pcl812_ai_cancel;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = MAX_CHANLIST_LEN;
s->do_cmdtest = pcl812_ai_cmdtest;
s->do_cmd = pcl812_ai_cmd;
s->poll = pcl812_ai_poll;
+ s->cancel = pcl812_ai_cancel;
}
switch (board->board_type) {
case boardPCL812PG:
@@ -1269,10 +1261,6 @@ no_dma:
default:
s->range_table = &range_bipolar10;
break;
- printk
- (", incorrect range number %d, changing "
- "to 0 (+/-10V)", it->options[4]);
- break;
}
break;
break;
@@ -1299,10 +1287,6 @@ no_dma:
default:
s->range_table = &range_iso813_1_ai;
break;
- printk
- (", incorrect range number %d, "
- "changing to 0 ", it->options[1]);
- break;
}
break;
case boardACL8113:
@@ -1324,10 +1308,6 @@ no_dma:
default:
s->range_table = &range_acl8113_1_ai;
break;
- printk
- (", incorrect range number %d, "
- "changing to 0 ", it->options[1]);
- break;
}
break;
}
@@ -1341,7 +1321,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = board->n_aochan;
s->maxdata = 0xfff;
- s->len_chanlist = 1;
s->range_table = board->rangelist_ao;
s->insn_read = pcl812_ao_insn_read;
s->insn_write = pcl812_ao_insn_write;
@@ -1370,7 +1349,6 @@ no_dma:
s->subdev_flags = SDF_READABLE;
s->n_chan = board->n_dichan;
s->maxdata = 1;
- s->len_chanlist = board->n_dichan;
s->range_table = &range_digital;
s->insn_bits = pcl812_di_insn_bits;
subdev++;
@@ -1383,7 +1361,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->n_dochan;
s->maxdata = 1;
- s->len_chanlist = board->n_dochan;
s->range_table = &range_digital;
s->insn_bits = pcl812_do_insn_bits;
subdev++;
@@ -1402,7 +1379,7 @@ no_dma:
break;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
- devpriv->mode_reg_int = (irq << 4) & 0xf0;
+ devpriv->mode_reg_int = (dev->irq << 4) & 0xf0;
break;
case boardPCL813B:
case boardPCL813:
@@ -1413,7 +1390,6 @@ no_dma:
break;
}
- printk(KERN_INFO "\n");
devpriv->valid = 1;
pcl812_reset(dev);
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index ab9d2bd..e9d4704 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -44,14 +44,10 @@ Configuration Options:
#include "comedi_fc.h"
#include "8253.h"
-#define DEBUG(x) x
-
/* boards constants */
/* IO space len */
#define PCLx1x_RANGE 16
-/* #define outb(x,y) printk("OUTB(%x, 200+%d)\n", x,y-0x200); outb(x,y) */
-
/* INTEL 8254 counters */
#define PCL816_CTR0 4
#define PCL816_CTR1 5
@@ -85,16 +81,17 @@ Configuration Options:
#define MAGIC_DMA_WORD 0x5a5a
-static const struct comedi_lrange range_pcl816 = { 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_pcl816 = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
struct pcl816_board {
@@ -132,7 +129,6 @@ struct pcl816_private {
unsigned int ai_scans; /* len of scanlist */
unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */
- int irq_free; /* 1=have allocated IRQ */
int irq_blocked; /* 1=IRQ now uses any subdev */
int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */
int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
@@ -143,7 +139,6 @@ struct pcl816_private {
unsigned int ai_act_chanlist_pos; /* actual position in MUX list */
unsigned int ai_n_chan; /* how many channels per scan */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
};
/*
@@ -176,7 +171,6 @@ static int pcl816_ai_insn_read(struct comedi_device *dev,
int n;
int timeout;
- DPRINTK("mode 0 analog input\n");
/* software trigger, DMA and INT off */
outb(0, dev->iobase + PCL816_CONTROL);
/* clear INT (conversion end) flag */
@@ -228,7 +222,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned char low, hi;
int timeout = 50; /* wait max 50us */
@@ -322,7 +316,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int len, bufptr, this_dma_buf;
unsigned long dma_flags;
unsigned short *ptr;
@@ -372,8 +366,6 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- DPRINTK("<I>");
-
if (!dev->attached) {
comedi_error(dev, "premature interrupt");
return IRQ_HANDLED;
@@ -389,8 +381,7 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
}
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- if (!dev->irq || !devpriv->irq_free || !devpriv->irq_blocked ||
- !devpriv->int816_mode) {
+ if (!dev->irq || !devpriv->irq_blocked || !devpriv->int816_mode) {
if (devpriv->irq_was_now_closed) {
devpriv->irq_was_now_closed = 0;
/* comedi_error(dev,"last IRQ.."); */
@@ -405,22 +396,6 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
/*
==============================================================================
- COMMAND MODE
-*/
-static void pcl816_cmdtest_out(int e, struct comedi_cmd *cmd)
-{
- printk(KERN_INFO "pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
- cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
- printk(KERN_INFO "pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
- cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
- printk(KERN_INFO "pcl816 e=%d stopsrc=%x scanend=%x\n", e,
- cmd->stop_src, cmd->scan_end_src);
- printk(KERN_INFO "pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n",
- e, cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
-}
-
-/*
-==============================================================================
*/
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
@@ -429,10 +404,6 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
int err = 0;
int tmp, divisor1 = 0, divisor2 = 0;
- DEBUG(printk(KERN_INFO "pcl816 pcl812_ai_cmdtest\n");
- pcl816_cmdtest_out(-1, cmd);
- );
-
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
@@ -566,15 +537,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_neverending = 1;
}
- /* don't we want wake up every scan? */
- if ((cmd->flags & TRIG_WAKE_EOS)) {
- printk(KERN_INFO
- "pl816: You wankt WAKE_EOS but I dont want handle it");
- /* devpriv->ai_eos=1; */
- /* if (devpriv->ai_n_chan==1) */
- /* devpriv->dma=0; // DMA is useless for this situation */
- }
-
if (devpriv->dma) {
bytes = devpriv->hwdmasize[0];
if (!devpriv->ai_neverending) {
@@ -630,7 +592,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- DPRINTK("pcl816 END: pcl812_ai_cmd()\n");
return 0;
}
@@ -685,8 +646,6 @@ static int pcl816_ai_cancel(struct comedi_device *dev,
{
struct pcl816_private *devpriv = dev->private;
-/* DEBUG(printk("pcl816_ai_cancel()\n");) */
-
if (devpriv->irq_blocked > 0) {
switch (devpriv->int816_mode) {
case INT_TYPE_AI1_DMA:
@@ -719,9 +678,7 @@ static int pcl816_ai_cancel(struct comedi_device *dev,
break;
}
}
-
- DEBUG(printk("comedi: pcl816_ai_cancel() successful\n");)
- return 0;
+ return 0;
}
/*
@@ -788,8 +745,8 @@ start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
udelay(1);
if (mode == 1) {
- DPRINTK("mode %d, divisor1 %d, divisor2 %d\n", mode, divisor1,
- divisor2);
+ dev_dbg(dev->class_dev, "mode %d, divisor1 %d, divisor2 %d\n",
+ mode, divisor1, divisor2);
outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2);
outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2);
outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1);
@@ -823,11 +780,6 @@ check_channel_list(struct comedi_device *dev,
/* first channel is every time ok */
chansegment[0] = chanlist[0];
for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
- /* build part of chanlist */
- DEBUG(printk(KERN_INFO "%d. %d %d\n", i,
- CR_CHAN(chanlist[i]),
- CR_RANGE(chanlist[i]));)
-
/* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
break;
@@ -835,12 +787,10 @@ check_channel_list(struct comedi_device *dev,
(CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
if (nowmustbechan != CR_CHAN(chanlist[i])) {
/* channel list isn't continuous :-( */
- printk(KERN_WARNING
- "comedi%d: pcl816: channel list must "
- "be continuous! chanlist[%i]=%d but "
- "must be %d or %d!\n", dev->minor,
- i, CR_CHAN(chanlist[i]), nowmustbechan,
- CR_CHAN(chanlist[0]));
+ dev_dbg(dev->class_dev,
+ "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
+ i, CR_CHAN(chanlist[i]), nowmustbechan,
+ CR_CHAN(chanlist[0]));
return 0;
}
/* well, this is next correct channel in list */
@@ -849,22 +799,15 @@ check_channel_list(struct comedi_device *dev,
/* check whole chanlist */
for (i = 0, segpos = 0; i < chanlen; i++) {
- DEBUG(printk("%d %d=%d %d\n",
- CR_CHAN(chansegment[i % seglen]),
- CR_RANGE(chansegment[i % seglen]),
- CR_CHAN(chanlist[i]),
- CR_RANGE(chanlist[i]));)
if (chanlist[i] != chansegment[i % seglen]) {
- printk(KERN_WARNING
- "comedi%d: pcl816: bad channel or range"
- " number! chanlist[%i]=%d,%d,%d and not"
- " %d,%d,%d!\n", dev->minor, i,
- CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
+ dev_dbg(dev->class_dev,
+ "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
+ i, CR_CHAN(chansegment[i]),
+ CR_RANGE(chansegment[i]),
+ CR_AREF(chansegment[i]),
+ CR_CHAN(chanlist[i % seglen]),
+ CR_RANGE(chanlist[i % seglen]),
+ CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
@@ -909,7 +852,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcl816_board *board = comedi_board(dev);
struct pcl816_private *devpriv;
int ret;
- unsigned int irq, dma;
+ unsigned int dma;
unsigned long pages;
/* int i; */
struct comedi_subdevice *s;
@@ -919,7 +862,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return ret;
if (pcl816_check(dev->iobase)) {
- printk(KERN_ERR ", I cann't detect board. FAIL!\n");
+ dev_err(dev->class_dev, "I can't detect board. FAIL!\n");
return -EIO;
}
@@ -927,43 +870,20 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- /* grab our IRQ */
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk
- (", IRQ %u is out of allowed range, "
- "DISABLING IT", irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl816, 0,
- dev->board_name, dev)) {
- printk
- (", unable to allocate IRQ %u, "
- "DISABLING IT", irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(KERN_INFO ", irq=%u", irq);
- }
- }
- }
+ if ((1 << it->options[1]) & board->IRQbits) {
+ ret = request_irq(it->options[1], interrupt_pcl816, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
- if (irq) /* 1=we have allocated irq */
- devpriv->irq_free = 1;
- else
- devpriv->irq_free = 0;
-
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->int816_mode = 0; /* mode of irq */
/* grab our DMA */
dma = 0;
devpriv->dma = dma;
- if (!devpriv->irq_free)
+ if (!dev->irq)
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (board->DMAbits != 0) { /* board support DMA */
@@ -972,23 +892,24 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
goto no_dma; /* DMA disabled */
if (((1 << dma) & board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ dev_err(dev->class_dev,
+ "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, dev->board_name);
if (ret) {
- printk(KERN_ERR
- ", unable to allocate DMA %u, FAIL!\n", dma);
+ dev_err(dev->class_dev,
+ "unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
- printk(KERN_INFO ", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
@@ -998,13 +919,11 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
- /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
- printk(KERN_ERR
- ", unable to allocate DMA buffer, "
- "FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
@@ -1029,20 +948,20 @@ no_dma:
s = &dev->subdevices[0];
if (board->n_aichan > 0) {
s->type = COMEDI_SUBD_AI;
- devpriv->sub_ai = s;
- dev->read_subdev = s;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_DIFF;
- /* printk (", %dchans DIFF DAC - %d", s->n_chan, i); */
s->maxdata = board->ai_maxdata;
- s->len_chanlist = board->ai_chanlist;
s->range_table = board->ai_range_type;
- s->cancel = pcl816_ai_cancel;
- s->do_cmdtest = pcl816_ai_cmdtest;
- s->do_cmd = pcl816_ai_cmd;
- s->poll = pcl816_ai_poll;
s->insn_read = pcl816_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = board->ai_chanlist;
+ s->do_cmdtest = pcl816_ai_cmdtest;
+ s->do_cmd = pcl816_ai_cmd;
+ s->poll = pcl816_ai_poll;
+ s->cancel = pcl816_ai_cancel;
+ }
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1075,8 +994,6 @@ case COMEDI_SUBD_DO:
pcl816_reset(dev);
- printk("\n");
-
return 0;
}
@@ -1085,7 +1002,7 @@ static void pcl816_detach(struct comedi_device *dev)
struct pcl816_private *devpriv = dev->private;
if (dev->private) {
- pcl816_ai_cancel(dev, devpriv->sub_ai);
+ pcl816_ai_cancel(dev, dev->read_subdev);
pcl816_reset(dev);
if (devpriv->dma)
free_dma(devpriv->dma);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 9e4d7e8..fa1758a 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -188,56 +188,78 @@ A word or two about DMA. Driver support DMA operations at two ways:
#define MAGIC_DMA_WORD 0x5a5a
-static const struct comedi_lrange range_pcl818h_ai = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10),
- }
+static const struct comedi_lrange range_pcl818h_ai = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
-static const struct comedi_lrange range_pcl818hg_ai = { 10, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- }
+static const struct comedi_lrange range_pcl818hg_ai = {
+ 10, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_pcl818l_l_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_pcl818l_l_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_pcl818l_h_ai = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range_pcl818l_h_ai = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
+};
+
+static const struct comedi_lrange range718_bipolar1 = {
+ 1, {
+ BIP_RANGE(1)
+ }
};
-static const struct comedi_lrange range718_bipolar1 = { 1, {BIP_RANGE(1),} };
static const struct comedi_lrange range718_bipolar0_5 = {
- 1, {BIP_RANGE(0.5),} };
-static const struct comedi_lrange range718_unipolar2 = { 1, {UNI_RANGE(2),} };
-static const struct comedi_lrange range718_unipolar1 = { 1, {BIP_RANGE(1),} };
+ 1, {
+ BIP_RANGE(0.5)
+ }
+};
+
+static const struct comedi_lrange range718_unipolar2 = {
+ 1, {
+ UNI_RANGE(2)
+ }
+};
+
+static const struct comedi_lrange range718_unipolar1 = {
+ 1, {
+ BIP_RANGE(1)
+ }
+};
struct pcl818_board {
@@ -274,7 +296,6 @@ struct pcl818_private {
unsigned char neverending_ai; /* if=1, then we do neverending record (you must use cancel()) */
unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- int irq_free; /* 1=have allocated IRQ */
int irq_blocked; /* 1=IRQ now uses any subdev */
int irq_was_now_closed; /* when IRQ finish, there's stored int818_mode for last interrupt */
int ai_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
@@ -291,7 +312,6 @@ struct pcl818_private {
unsigned int ai_data_len; /* len of data buffer */
unsigned int ai_timer1; /* timers */
unsigned int ai_timer2;
- struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
unsigned char usefifo; /* 1=use fifo */
unsigned int ao_readback[2];
};
@@ -441,7 +461,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned char low;
int timeout = 50; /* wait max 50us */
@@ -463,10 +483,10 @@ conv_finish:
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
if ((low & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- printk
- ("comedi: A/D mode1/3 IRQ - channel dropout %x!=%x !\n",
- (low & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
+ dev_dbg(dev->class_dev,
+ "A/D mode1/3 IRQ - channel dropout %x!=%x !\n",
+ (low & 0xf),
+ devpriv->act_chanlist[devpriv->act_chanlist_pos]);
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -478,7 +498,6 @@ conv_finish:
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
- /* printk("E"); */
s->async->cur_chan = 0;
devpriv->ai_act_scan--;
}
@@ -501,7 +520,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int i, len, bufptr;
unsigned long flags;
unsigned short *ptr;
@@ -523,7 +542,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
release_dma_lock(flags);
enable_dma(devpriv->dma);
}
- printk("comedi: A/D mode1/3 IRQ \n");
devpriv->dma_runs_to_end--;
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
@@ -534,11 +552,11 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
for (i = 0; i < len; i++) {
if ((ptr[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- printk
- ("comedi: A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n",
- (ptr[bufptr] & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos],
- devpriv->act_chanlist_pos);
+ dev_dbg(dev->class_dev,
+ "A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n",
+ (ptr[bufptr] & 0xf),
+ devpriv->act_chanlist[devpriv->act_chanlist_pos],
+ devpriv->act_chanlist_pos);
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -562,7 +580,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
- /* printk("done int ai13 dma\n"); */
return IRQ_HANDLED;
}
}
@@ -580,7 +597,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int i, len;
unsigned char lo;
@@ -612,10 +629,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
for (i = 0; i < len; i++) {
lo = inb(dev->iobase + PCL818_FI_DATALO);
if ((lo & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- printk
- ("comedi: A/D mode1/3 FIFO - channel dropout %d!=%d !\n",
- (lo & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
+ dev_dbg(dev->class_dev,
+ "A/D mode1/3 FIFO - channel dropout %d!=%d !\n",
+ (lo & 0xf),
+ devpriv->act_chanlist[devpriv->act_chanlist_pos]);
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -661,7 +678,6 @@ static irqreturn_t interrupt_pcl818(int irq, void *d)
comedi_error(dev, "premature interrupt");
return IRQ_HANDLED;
}
- /* printk("I\n"); */
if (devpriv->irq_blocked && devpriv->irq_was_now_closed) {
if ((devpriv->neverending_ai || (!devpriv->neverending_ai &&
@@ -673,10 +689,9 @@ static irqreturn_t interrupt_pcl818(int irq, void *d)
being reprogrammed while a DMA transfer is in
progress.
*/
- struct comedi_subdevice *s = &dev->subdevices[0];
devpriv->ai_act_scan = 0;
devpriv->neverending_ai = 0;
- pcl818_ai_cancel(dev, s);
+ pcl818_ai_cancel(dev, dev->read_subdev);
}
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
@@ -705,8 +720,7 @@ static irqreturn_t interrupt_pcl818(int irq, void *d)
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
- if ((!dev->irq) || (!devpriv->irq_free) || (!devpriv->irq_blocked)
- || (!devpriv->ai_mode)) {
+ if (!devpriv->irq_blocked || !devpriv->ai_mode) {
comedi_error(dev, "bad IRQ!");
return IRQ_NONE;
}
@@ -726,7 +740,6 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
unsigned int flags;
unsigned int bytes;
- printk("mode13dma_int, mode: %d\n", mode);
disable_dma(devpriv->dma); /* disable dma */
bytes = devpriv->hwdmasize[0];
if (!devpriv->neverending_ai) {
@@ -753,7 +766,7 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
} else {
devpriv->ai_mode = INT_TYPE_AI3_DMA;
outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */
- };
+ }
}
/*
@@ -768,12 +781,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
int divisor1 = 0, divisor2 = 0;
unsigned int seglen;
- dev_dbg(dev->class_dev, "pcl818_ai_cmd_mode()\n");
- if (!dev->irq) {
- comedi_error(dev, "IRQ not defined!");
- return -EINVAL;
- }
-
if (devpriv->irq_blocked)
return -EBUSY;
@@ -824,7 +831,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
case 0:
if (!devpriv->usefifo) {
/* IRQ */
- /* printk("IRQ\n"); */
if (mode == 1) {
devpriv->ai_mode = INT_TYPE_AI1_INT;
/* Pacer+IRQ */
@@ -853,7 +859,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
start_pacer(dev, mode, divisor1, divisor2);
- dev_dbg(dev->class_dev, "pcl818_ai_cmd_mode() end\n");
return 0;
}
@@ -899,10 +904,6 @@ static int check_channel_list(struct comedi_device *dev,
chansegment[0] = chanlist[0];
/* build part of chanlist */
for (i = 1, seglen = 1; i < n_chan; i++, seglen++) {
-
- /* printk("%d. %d * %d\n",i,
- * CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i]));*/
-
/* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
@@ -910,10 +911,10 @@ static int check_channel_list(struct comedi_device *dev,
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % s->n_chan;
if (nowmustbechan != CR_CHAN(chanlist[i])) { /* channel list isn't continuous :-( */
- printk
- ("comedi%d: pcl818: channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
- dev->minor, i, CR_CHAN(chanlist[i]),
- nowmustbechan, CR_CHAN(chanlist[0]));
+ dev_dbg(dev->class_dev,
+ "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
+ i, CR_CHAN(chanlist[i]), nowmustbechan,
+ CR_CHAN(chanlist[0]));
return 0;
}
/* well, this is next correct channel in list */
@@ -922,23 +923,21 @@ static int check_channel_list(struct comedi_device *dev,
/* check whole chanlist */
for (i = 0, segpos = 0; i < n_chan; i++) {
- /* printk("%d %d=%d %d\n",CR_CHAN(chansegment[i%seglen]),CR_RANGE(chansegment[i%seglen]),CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i])); */
if (chanlist[i] != chansegment[i % seglen]) {
- printk
- ("comedi%d: pcl818: bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
- dev->minor, i, CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
+ dev_dbg(dev->class_dev,
+ "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
+ i, CR_CHAN(chansegment[i]),
+ CR_RANGE(chansegment[i]),
+ CR_AREF(chansegment[i]),
+ CR_CHAN(chanlist[i % seglen]),
+ CR_RANGE(chanlist[i % seglen]),
+ CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
} else {
seglen = 1;
}
- printk("check_channel_list: seglen %d\n", seglen);
return seglen;
}
@@ -1067,7 +1066,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
int retval;
- dev_dbg(dev->class_dev, "pcl818_ai_cmd()\n");
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
@@ -1084,7 +1082,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
devpriv->ai_timer1 = cmd->convert_arg;
retval = pcl818_ai_cmd_mode(1, dev, s);
- dev_dbg(dev->class_dev, "pcl818_ai_cmd() end\n");
return retval;
}
if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
@@ -1105,7 +1102,6 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
struct pcl818_private *devpriv = dev->private;
if (devpriv->irq_blocked > 0) {
- dev_dbg(dev->class_dev, "pcl818_ai_cancel()\n");
devpriv->irq_was_now_closed = 1;
switch (devpriv->ai_mode) {
@@ -1149,7 +1145,6 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
}
end:
- dev_dbg(dev->class_dev, "pcl818_ai_cancel() end\n");
return 0;
}
@@ -1216,7 +1211,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcl818_board *board = comedi_board(dev);
struct pcl818_private *devpriv;
int ret;
- unsigned int irq;
int dma;
unsigned long pages;
struct comedi_subdevice *s;
@@ -1240,50 +1234,28 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- /* grab our IRQ */
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk
- (", IRQ %u is out of allowed range, DISABLING IT",
- irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl818, 0,
- dev->board_name, dev)) {
- printk
- (", unable to allocate IRQ %u, DISABLING IT",
- irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(KERN_DEBUG "irq=%u", irq);
- }
- }
- }
+ if ((1 << it->options[1]) & board->IRQbits) {
+ ret = request_irq(it->options[1], interrupt_pcl818, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
- if (irq)
- devpriv->irq_free = 1; /* 1=we have allocated irq */
- else
- devpriv->irq_free = 0;
-
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->ai_mode = 0; /* mode of irq */
/* grab our DMA */
dma = 0;
devpriv->dma = dma;
- if (!devpriv->irq_free)
+ if (!dev->irq)
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & board->DMAbits) == 0) {
- printk(KERN_ERR "DMA is out of allowed range, FAIL!\n");
+ dev_err(dev->class_dev,
+ "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, dev->board_name);
@@ -1298,7 +1270,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
- /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1])
return -EBUSY;
@@ -1318,27 +1289,24 @@ no_dma:
s->type = COMEDI_SUBD_UNUSED;
} else {
s->type = COMEDI_SUBD_AI;
- devpriv->sub_ai = s;
s->subdev_flags = SDF_READABLE;
if (check_single_ended(dev->iobase)) {
s->n_chan = board->n_aichan_se;
s->subdev_flags |= SDF_COMMON | SDF_GROUND;
- printk(", %dchans S.E. DAC", s->n_chan);
} else {
s->n_chan = board->n_aichan_diff;
s->subdev_flags |= SDF_DIFF;
- printk(", %dchans DIFF DAC", s->n_chan);
}
s->maxdata = board->ai_maxdata;
- s->len_chanlist = s->n_chan;
s->range_table = board->ai_range_type;
- s->cancel = pcl818_ai_cancel;
s->insn_read = pcl818_ai_insn_read;
- if (irq) {
+ if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = ai_cmdtest;
s->do_cmd = ai_cmd;
+ s->cancel = pcl818_ai_cancel;
}
if (board->is_818) {
if ((it->options[4] == 1) || (it->options[4] == 10))
@@ -1387,7 +1355,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = board->n_aochan;
s->maxdata = board->ao_maxdata;
- s->len_chanlist = board->n_aochan;
s->range_table = board->ao_range_type;
s->insn_read = pcl818_ao_insn_read;
s->insn_write = pcl818_ao_insn_write;
@@ -1412,7 +1379,6 @@ no_dma:
s->subdev_flags = SDF_READABLE;
s->n_chan = board->n_dichan;
s->maxdata = 1;
- s->len_chanlist = board->n_dichan;
s->range_table = &range_digital;
s->insn_bits = pcl818_di_insn_bits;
}
@@ -1425,7 +1391,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->n_dochan;
s->maxdata = 1;
- s->len_chanlist = board->n_dochan;
s->range_table = &range_digital;
s->insn_bits = pcl818_do_insn_bits;
}
@@ -1446,8 +1411,6 @@ no_dma:
pcl818_reset(dev);
- printk("\n");
-
return 0;
}
@@ -1456,7 +1419,7 @@ static void pcl818_detach(struct comedi_device *dev)
struct pcl818_private *devpriv = dev->private;
if (devpriv) {
- pcl818_ai_cancel(dev, devpriv->sub_ai);
+ pcl818_ai_cancel(dev, dev->read_subdev);
pcl818_reset(dev);
if (devpriv->dma)
free_dma(devpriv->dma);
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index cc1dc7f..f4a49bd 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -70,14 +70,11 @@ static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
{
unsigned long iobase = arg;
unsigned char inbres;
- /* printk("8255cb %d %d %d %lx\n", dir,port,data,arg); */
if (dir) {
- /* printk("8255 cb outb(%x, %lx)\n", data, iobase+port); */
outb(data, iobase + port);
return 0;
} else {
inbres = inb(iobase + port);
- /* printk("8255 cb inb(%lx) = %x\n", iobase+port, inbres); */
return inbres;
}
}
@@ -137,8 +134,6 @@ static void do_3724_config(struct comedi_device *dev,
port_8255_cfg = dev->iobase + SIZE_8255 + _8255_CR;
outb(buffer_config, dev->iobase + 8); /* update buffer register */
- /* printk("pcm3724 buffer_config (%lx) %d, %x\n",
- dev->iobase + _8255_CR, chanspec, buffer_config); */
outb(config, port_8255_cfg);
}
@@ -177,7 +172,6 @@ static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s,
if (priv->dio_2 & 0xff)
gatecfg |= GATE_A1;
- /* printk("gate control %x\n", gatecfg); */
outb(gatecfg, dev->iobase + 9);
}
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 14cee3a..c388f7f 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -1,76 +1,76 @@
/*
- comedi/drivers/pcmmio.c
- Driver for Winsystems PC-104 based multifunction IO board.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2007 Calin A. Culianu <calin@ajvar.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * pcmmio.c
+ * Driver for Winsystems PC-104 based multifunction IO board.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2007 Calin A. Culianu <calin@ajvar.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: pcmmio
-Description: A driver for the PCM-MIO multifunction board
-Devices: [Winsystems] PCM-MIO (pcmmio)
-Author: Calin Culianu <calin@ajvar.org>
-Updated: Wed, May 16 2007 16:21:10 -0500
-Status: works
-
-A driver for the relatively new PCM-MIO multifunction board from
-Winsystems. This board is a PC-104 based I/O board. It contains
-four subdevices:
- subdevice 0 - 16 channels of 16-bit AI
- subdevice 1 - 8 channels of 16-bit AO
- subdevice 2 - first 24 channels of the 48 channel of DIO
- (with edge-triggered interrupt support)
- subdevice 3 - last 24 channels of the 48 channel DIO
- (no interrupt support for this bank of channels)
-
- Some notes:
-
- Synchronous reads and writes are the only things implemented for AI and AO,
- even though the hardware itself can do streaming acquisition, etc. Anyone
- want to add asynchronous I/O for AI/AO as a feature? Be my guest...
-
- Asynchronous I/O for the DIO subdevices *is* implemented, however! They are
- basically edge-triggered interrupts for any configuration of the first
- 24 DIO-lines.
-
- Also note that this interrupt support is untested.
-
- A few words about edge-detection IRQ support (commands on DIO):
-
- * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ
- of the board to the comedi_config command. The board IRQ is not jumpered
- but rather configured through software, so any IRQ from 1-15 is OK.
-
- * Due to the genericity of the comedi API, you need to create a special
- comedi_command in order to use edge-triggered interrupts for DIO.
-
- * Use comedi_commands with TRIG_NOW. Your callback will be called each
- time an edge is detected on the specified DIO line(s), and the data
- values will be two sample_t's, which should be concatenated to form
- one 32-bit unsigned int. This value is the mask of channels that had
- edges detected from your channel list. Note that the bits positions
- in the mask correspond to positions in your chanlist when you
- specified the command and *not* channel id's!
-
- * To set the polarity of the edge-detection interrupts pass a nonzero value
- for either CR_RANGE or CR_AREF for edge-up polarity, or a zero
- value for both CR_RANGE and CR_AREF if you want edge-down polarity.
-
-Configuration Options:
- [0] - I/O port base address
- [1] - IRQ (optional -- for edge-detect interrupt support only,
- leave out if you don't need this feature)
-*/
+ * Driver: pcmmio
+ * Description: A driver for the PCM-MIO multifunction board
+ * Devices: (Winsystems) PCM-MIO [pcmmio]
+ * Author: Calin Culianu <calin@ajvar.org>
+ * Updated: Wed, May 16 2007 16:21:10 -0500
+ * Status: works
+ *
+ * A driver for the PCM-MIO multifunction board from Winsystems. This
+ * is a PC-104 based I/O board. It contains four subdevices:
+ *
+ * subdevice 0 - 16 channels of 16-bit AI
+ * subdevice 1 - 8 channels of 16-bit AO
+ * subdevice 2 - first 24 channels of the 48 channel of DIO
+ * (with edge-triggered interrupt support)
+ * subdevice 3 - last 24 channels of the 48 channel DIO
+ * (no interrupt support for this bank of channels)
+ *
+ * Some notes:
+ *
+ * Synchronous reads and writes are the only things implemented for analog
+ * input and output. The hardware itself can do streaming acquisition, etc.
+ *
+ * Asynchronous I/O for the DIO subdevices *is* implemented, however! They
+ * are basically edge-triggered interrupts for any configuration of the
+ * channels in subdevice 2.
+ *
+ * Also note that this interrupt support is untested.
+ *
+ * A few words about edge-detection IRQ support (commands on DIO):
+ *
+ * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ
+ * of the board to the comedi_config command. The board IRQ is not jumpered
+ * but rather configured through software, so any IRQ from 1-15 is OK.
+ *
+ * Due to the genericity of the comedi API, you need to create a special
+ * comedi_command in order to use edge-triggered interrupts for DIO.
+ *
+ * Use comedi_commands with TRIG_NOW. Your callback will be called each
+ * time an edge is detected on the specified DIO line(s), and the data
+ * values will be two sample_t's, which should be concatenated to form
+ * one 32-bit unsigned int. This value is the mask of channels that had
+ * edges detected from your channel list. Note that the bits positions
+ * in the mask correspond to positions in your chanlist when you
+ * specified the command and *not* channel id's!
+ *
+ * To set the polarity of the edge-detection interrupts pass a nonzero value
+ * for either CR_RANGE or CR_AREF for edge-up polarity, or a zero
+ * value for both CR_RANGE and CR_AREF if you want edge-down polarity.
+ *
+ * Configuration Options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional -- for edge-detect interrupt support only,
+ * leave out if you don't need this feature)
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -80,232 +80,211 @@ Configuration Options:
#include "comedi_fc.h"
-/* This stuff is all from pcmuio.c -- it refers to the DIO subdevices only */
-#define CHANS_PER_PORT 8
-#define PORTS_PER_ASIC 6
-#define INTR_PORTS_PER_ASIC 3
-#define MAX_CHANS_PER_SUBDEV 24 /* number of channels per comedi subdevice */
-#define PORTS_PER_SUBDEV (MAX_CHANS_PER_SUBDEV/CHANS_PER_PORT)
-#define CHANS_PER_ASIC (CHANS_PER_PORT*PORTS_PER_ASIC)
-#define INTR_CHANS_PER_ASIC 24
-#define INTR_PORTS_PER_SUBDEV (INTR_CHANS_PER_ASIC/CHANS_PER_PORT)
-#define MAX_DIO_CHANS (PORTS_PER_ASIC*1*CHANS_PER_PORT)
-#define MAX_ASICS (MAX_DIO_CHANS/CHANS_PER_ASIC)
-#define CALC_N_DIO_SUBDEVS(nchans) ((nchans)/MAX_CHANS_PER_SUBDEV + (!!((nchans)%MAX_CHANS_PER_SUBDEV)) /*+ (nchans > INTR_CHANS_PER_ASIC ? 2 : 1)*/)
-/* IO Memory sizes */
-#define ASIC_IOSIZE (0x0B)
-#define PCMMIO48_IOSIZE ASIC_IOSIZE
-
-/* Some offsets - these are all in the 16byte IO memory offset from
- the base address. Note that there is a paging scheme to swap out
- offsets 0x8-0xA using the PAGELOCK register. See the table below.
-
- Register(s) Pages R/W? Description
- --------------------------------------------------------------
- REG_PORTx All R/W Read/Write/Configure IO
- REG_INT_PENDING All ReadOnly Quickly see which INT_IDx has int.
- REG_PAGELOCK All WriteOnly Select a page
- REG_POLx Pg. 1 only WriteOnly Select edge-detection polarity
- REG_ENABx Pg. 2 only WriteOnly Enable/Disable edge-detect. int.
- REG_INT_IDx Pg. 3 only R/W See which ports/bits have ints.
+/*
+ * Register I/O map
*/
-#define REG_PORT0 0x0
-#define REG_PORT1 0x1
-#define REG_PORT2 0x2
-#define REG_PORT3 0x3
-#define REG_PORT4 0x4
-#define REG_PORT5 0x5
-#define REG_INT_PENDING 0x6
-#define REG_PAGELOCK 0x7 /*
- * page selector register, upper 2 bits select
- * a page and bits 0-5 are used to 'lock down'
- * a particular port above to make it readonly.
- */
-#define REG_POL0 0x8
-#define REG_POL1 0x9
-#define REG_POL2 0xA
-#define REG_ENAB0 0x8
-#define REG_ENAB1 0x9
-#define REG_ENAB2 0xA
-#define REG_INT_ID0 0x8
-#define REG_INT_ID1 0x9
-#define REG_INT_ID2 0xA
-
-#define NUM_PAGED_REGS 3
-#define NUM_PAGES 4
-#define FIRST_PAGED_REG 0x8
-#define REG_PAGE_BITOFFSET 6
-#define REG_LOCK_BITOFFSET 0
-#define REG_PAGE_MASK (~((0x1<<REG_PAGE_BITOFFSET)-1))
-#define REG_LOCK_MASK (~(REG_PAGE_MASK))
-#define PAGE_POL 1
-#define PAGE_ENAB 2
-#define PAGE_INT_ID 3
-
-static const struct comedi_lrange ranges_ai = {
- 4, {RANGE(-5., 5.), RANGE(-10., 10.), RANGE(0., 5.), RANGE(0., 10.)}
-};
+#define PCMMIO_AI_LSB_REG 0x00
+#define PCMMIO_AI_MSB_REG 0x01
+#define PCMMIO_AI_CMD_REG 0x02
+#define PCMMIO_AI_CMD_SE (1 << 7)
+#define PCMMIO_AI_CMD_ODD_CHAN (1 << 6)
+#define PCMMIO_AI_CMD_CHAN_SEL(x) (((x) & 0x3) << 4)
+#define PCMMIO_AI_CMD_RANGE(x) (((x) & 0x3) << 2)
+#define PCMMIO_RESOURCE_REG 0x02
+#define PCMMIO_RESOURCE_IRQ(x) (((x) & 0xf) << 0)
+#define PCMMIO_AI_STATUS_REG 0x03
+#define PCMMIO_AI_STATUS_DATA_READY (1 << 7)
+#define PCMMIO_AI_STATUS_DATA_DMA_PEND (1 << 6)
+#define PCMMIO_AI_STATUS_CMD_DMA_PEND (1 << 5)
+#define PCMMIO_AI_STATUS_IRQ_PEND (1 << 4)
+#define PCMMIO_AI_STATUS_DATA_DRQ_ENA (1 << 2)
+#define PCMMIO_AI_STATUS_REG_SEL (1 << 3)
+#define PCMMIO_AI_STATUS_CMD_DRQ_ENA (1 << 1)
+#define PCMMIO_AI_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AI_RES_ENA_REG 0x03
+#define PCMMIO_AI_RES_ENA_CMD_REG_ACCESS (0 << 3)
+#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS (1 << 3)
+#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS (1 << 4)
+#define PCMMIO_AI_2ND_ADC_OFFSET 0x04
+
+#define PCMMIO_AO_LSB_REG 0x08
+#define PCMMIO_AO_LSB_SPAN(x) (((x) & 0xf) << 0)
+#define PCMMIO_AO_MSB_REG 0x09
+#define PCMMIO_AO_CMD_REG 0x0a
+#define PCMMIO_AO_CMD_WR_SPAN (0x2 << 4)
+#define PCMMIO_AO_CMD_WR_CODE (0x3 << 4)
+#define PCMMIO_AO_CMD_UPDATE (0x4 << 4)
+#define PCMMIO_AO_CMD_UPDATE_ALL (0x5 << 4)
+#define PCMMIO_AO_CMD_WR_SPAN_UPDATE (0x6 << 4)
+#define PCMMIO_AO_CMD_WR_CODE_UPDATE (0x7 << 4)
+#define PCMMIO_AO_CMD_WR_SPAN_UPDATE_ALL (0x8 << 4)
+#define PCMMIO_AO_CMD_WR_CODE_UPDATE_ALL (0x9 << 4)
+#define PCMMIO_AO_CMD_RD_B1_SPAN (0xa << 4)
+#define PCMMIO_AO_CMD_RD_B1_CODE (0xb << 4)
+#define PCMMIO_AO_CMD_RD_B2_SPAN (0xc << 4)
+#define PCMMIO_AO_CMD_RD_B2_CODE (0xd << 4)
+#define PCMMIO_AO_CMD_NOP (0xf << 4)
+#define PCMMIO_AO_CMD_CHAN_SEL(x) (((x) & 0x03) << 1)
+#define PCMMIO_AO_CMD_CHAN_SEL_ALL (0x0f << 0)
+#define PCMMIO_AO_STATUS_REG 0x0b
+#define PCMMIO_AO_STATUS_DATA_READY (1 << 7)
+#define PCMMIO_AO_STATUS_DATA_DMA_PEND (1 << 6)
+#define PCMMIO_AO_STATUS_CMD_DMA_PEND (1 << 5)
+#define PCMMIO_AO_STATUS_IRQ_PEND (1 << 4)
+#define PCMMIO_AO_STATUS_DATA_DRQ_ENA (1 << 2)
+#define PCMMIO_AO_STATUS_REG_SEL (1 << 3)
+#define PCMMIO_AO_STATUS_CMD_DRQ_ENA (1 << 1)
+#define PCMMIO_AO_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AO_RESOURCE_ENA_REG 0x0b
+#define PCMMIO_AO_2ND_DAC_OFFSET 0x04
-static const struct comedi_lrange ranges_ao = {
- 6, {RANGE(0., 5.), RANGE(0., 10.), RANGE(-5., 5.), RANGE(-10., 10.),
- RANGE(-2.5, 2.5), RANGE(-2.5, 7.5)}
+/*
+ * WinSystems WS16C48
+ *
+ * Offset Page 0 Page 1 Page 2 Page 3
+ * ------ ----------- ----------- ----------- -----------
+ * 0x10 Port 0 I/O Port 0 I/O Port 0 I/O Port 0 I/O
+ * 0x11 Port 1 I/O Port 1 I/O Port 1 I/O Port 1 I/O
+ * 0x12 Port 2 I/O Port 2 I/O Port 2 I/O Port 2 I/O
+ * 0x13 Port 3 I/O Port 3 I/O Port 3 I/O Port 3 I/O
+ * 0x14 Port 4 I/O Port 4 I/O Port 4 I/O Port 4 I/O
+ * 0x15 Port 5 I/O Port 5 I/O Port 5 I/O Port 5 I/O
+ * 0x16 INT_PENDING INT_PENDING INT_PENDING INT_PENDING
+ * 0x17 Page/Lock Page/Lock Page/Lock Page/Lock
+ * 0x18 N/A POL_0 ENAB_0 INT_ID0
+ * 0x19 N/A POL_1 ENAB_1 INT_ID1
+ * 0x1a N/A POL_2 ENAB_2 INT_ID2
+ */
+#define PCMMIO_PORT_REG(x) (0x10 + (x))
+#define PCMMIO_INT_PENDING_REG 0x16
+#define PCMMIO_PAGE_LOCK_REG 0x17
+#define PCMMIO_LOCK_PORT(x) ((1 << (x)) & 0x3f)
+#define PCMMIO_PAGE(x) (((x) & 0x3) << 6)
+#define PCMMIO_PAGE_MASK PCMUIO_PAGE(3)
+#define PCMMIO_PAGE_POL 1
+#define PCMMIO_PAGE_ENAB 2
+#define PCMMIO_PAGE_INT_ID 3
+#define PCMMIO_PAGE_REG(x) (0x18 + (x))
+
+static const struct comedi_lrange pcmmio_ai_ranges = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
-/* this structure is for data unique to this subdevice. */
-struct pcmmio_subdev_private {
-
- union {
- /* for DIO: mapping of halfwords (bytes)
- in port/chanarray to iobase */
- unsigned long iobases[PORTS_PER_SUBDEV];
-
- /* for AI/AO */
- unsigned long iobase;
- };
- union {
- struct {
-
- /* The below is only used for intr subdevices */
- struct {
- /*
- * if non-negative, this subdev has an
- * interrupt asic
- */
- int asic;
- /*
- * if nonnegative, the first channel id for
- * interrupts.
- */
- int first_chan;
- /*
- * the number of asic channels in this subdev
- * that have interrutps
- */
- int num_asic_chans;
- /*
- * if nonnegative, the first channel id with
- * respect to the asic that has interrupts
- */
- int asic_chan;
- /*
- * subdev-relative channel mask for channels
- * we are interested in
- */
- int enabled_mask;
- int active;
- int stop_count;
- int continuous;
- spinlock_t spinlock;
- } intr;
- } dio;
- struct {
- /* the last unsigned int data written */
- unsigned int shadow_samples[8];
- } ao;
- };
+static const struct comedi_lrange pcmmio_ao_ranges = {
+ 6, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ BIP_RANGE(2.5),
+ RANGE(-2.5, 7.5)
+ }
};
-/*
- * this structure is for data unique to this hardware driver. If
- * several hardware drivers keep similar information in this structure,
- * feel free to suggest moving the variable to the struct comedi_device struct.
- */
struct pcmmio_private {
- /* stuff for DIO */
- struct {
- unsigned char pagelock; /* current page and lock */
- /* shadow of POLx registers */
- unsigned char pol[NUM_PAGED_REGS];
- /* shadow of ENABx registers */
- unsigned char enab[NUM_PAGED_REGS];
- int num;
- unsigned long iobase;
- unsigned int irq;
- spinlock_t spinlock;
- } asics[MAX_ASICS];
- struct pcmmio_subdev_private *sprivs;
+ spinlock_t pagelock; /* protects the page registers */
+ spinlock_t spinlock; /* protects the member variables */
+ unsigned int enabled_mask;
+ unsigned int stop_count;
+ unsigned int active:1;
+ unsigned int continuous:1;
+
+ unsigned int ao_readback[8];
};
-#define subpriv ((struct pcmmio_subdev_private *)s->private)
+static void pcmmio_dio_write(struct comedi_device *dev, unsigned int val,
+ int page, int port)
+{
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned long iobase = dev->iobase;
+ unsigned long flags;
+
+ spin_lock_irqsave(&devpriv->pagelock, flags);
+ if (page == 0) {
+ /* Port registers are valid for any page */
+ outb(val & 0xff, iobase + PCMMIO_PORT_REG(port + 0));
+ outb((val >> 8) & 0xff, iobase + PCMMIO_PORT_REG(port + 1));
+ outb((val >> 16) & 0xff, iobase + PCMMIO_PORT_REG(port + 2));
+ } else {
+ outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
+ outb(val & 0xff, iobase + PCMMIO_PAGE_REG(0));
+ outb((val >> 8) & 0xff, iobase + PCMMIO_PAGE_REG(1));
+ outb((val >> 16) & 0xff, iobase + PCMMIO_PAGE_REG(2));
+ }
+ spin_unlock_irqrestore(&devpriv->pagelock, flags);
+}
+
+static unsigned int pcmmio_dio_read(struct comedi_device *dev,
+ int page, int port)
+{
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned long iobase = dev->iobase;
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&devpriv->pagelock, flags);
+ if (page == 0) {
+ /* Port registers are valid for any page */
+ val = inb(iobase + PCMMIO_PORT_REG(port + 0));
+ val |= (inb(iobase + PCMMIO_PORT_REG(port + 1)) << 8);
+ val |= (inb(iobase + PCMMIO_PORT_REG(port + 2)) << 16);
+ } else {
+ outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
+ val = inb(iobase + PCMMIO_PAGE_REG(0));
+ val |= (inb(iobase + PCMMIO_PAGE_REG(1)) << 8);
+ val |= (inb(iobase + PCMMIO_PAGE_REG(2)) << 16);
+ }
+ spin_unlock_irqrestore(&devpriv->pagelock, flags);
+
+ return val;
+}
-/* DIO devices are slightly special. Although it is possible to
- * implement the insn_read/insn_write interface, it is much more
- * useful to applications if you implement the insn_bits interface.
- * This allows packed reading/writing of the DIO channels. The
- * comedi core can convert between insn_bits and insn_read/write */
+/*
+ * Each channel can be individually programmed for input or output.
+ * Writing a '0' to a channel causes the corresponding output pin
+ * to go to a high-z state (pulled high by an external 10K resistor).
+ * This allows it to be used as an input. When used in the input mode,
+ * a read reflects the inverted state of the I/O pin, such that a
+ * high on the pin will read as a '0' in the register. Writing a '1'
+ * to a bit position causes the pin to sink current (up to 12mA),
+ * effectively pulling it low.
+ */
static int pcmmio_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int byte_no;
-
- /* NOTE:
- reading a 0 means this channel was high
- writine a 0 sets the channel high
- reading a 1 means this channel was low
- writing a 1 means set this channel low
-
- Therefore everything is always inverted. */
-
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
-
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk(KERN_DEBUG "write mask: %08x data: %08x\n", data[0], data[1]);
-#endif
-
- s->state = 0;
-
- for (byte_no = 0; byte_no < s->n_chan / CHANS_PER_PORT; ++byte_no) {
- /* address of 8-bit port */
- unsigned long ioaddr = subpriv->iobases[byte_no],
- /* bit offset of port in 32-bit doubleword */
- offset = byte_no * 8;
- /* this 8-bit port's data */
- unsigned char byte = 0,
- /* The write mask for this port (if any) */
- write_mask_byte = (data[0] >> offset) & 0xff,
- /* The data byte for this port */
- data_byte = (data[1] >> offset) & 0xff;
-
- byte = inb(ioaddr); /* read all 8-bits for this port */
-
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk
- (KERN_DEBUG "byte %d wmb %02x db %02x offset %02d io %04x,"
- " data_in %02x ", byte_no, (unsigned)write_mask_byte,
- (unsigned)data_byte, offset, ioaddr, (unsigned)byte);
-#endif
-
- if (write_mask_byte) {
- /*
- * this byte has some write_bits
- * -- so set the output lines
- */
- /* clear bits for write mask */
- byte &= ~write_mask_byte;
- /* set to inverted data_byte */
- byte |= ~data_byte & write_mask_byte;
- /* Write out the new digital output state */
- outb(byte, ioaddr);
- }
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte);
-#endif
- /* save the digital input lines for this byte.. */
- s->state |= ((unsigned int)byte) << offset;
+ /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */
+ int port = s->index == 2 ? 0 : 3;
+ unsigned int chanmask = (1 << s->n_chan) - 1;
+ unsigned int mask;
+ unsigned int val;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ /*
+ * Outputs are inverted, invert the state and
+ * update the channels.
+ *
+ * The s->io_bits mask makes sure the input channels
+ * are '0' so that the outputs pins stay in a high
+ * z-state.
+ */
+ val = ~s->state & chanmask;
+ val &= s->io_bits;
+ pcmmio_dio_write(dev, val, 0, port);
}
- /* now return the DIO lines to data[1] - note they came inverted! */
- data[1] = ~s->state;
+ /* get inverted state of the channels from the port */
+ val = pcmmio_dio_read(dev, 0, port);
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk(KERN_DEBUG "s->state %08x data_out %08x\n", s->state, data[1]);
-#endif
+ /* return the true state of the channels */
+ data[1] = ~val & chanmask;
return insn->n;
}
@@ -315,376 +294,172 @@ static int pcmmio_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int byte_no = chan / 8;
- int bit_no = chan % 8;
+ /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */
+ int port = s->index == 2 ? 0 : 3;
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
- if (data[0] == INSN_CONFIG_DIO_INPUT) {
- unsigned long ioaddr = subpriv->iobases[byte_no];
- unsigned char val;
-
- val = inb(ioaddr);
- val &= ~(1 << bit_no);
- outb(val, ioaddr);
- }
+ if (data[0] == INSN_CONFIG_DIO_INPUT)
+ pcmmio_dio_write(dev, s->io_bits, 0, port);
return insn->n;
}
-static void switch_page(struct comedi_device *dev, int asic, int page)
+static void pcmmio_reset(struct comedi_device *dev)
{
- struct pcmmio_private *devpriv = dev->private;
-
- if (asic < 0 || asic >= 1)
- return; /* paranoia */
- if (page < 0 || page >= NUM_PAGES)
- return; /* more paranoia */
-
- devpriv->asics[asic].pagelock &= ~REG_PAGE_MASK;
- devpriv->asics[asic].pagelock |= page << REG_PAGE_BITOFFSET;
-
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- devpriv->asics[asic].iobase + REG_PAGELOCK);
+ /* Clear all the DIO port bits */
+ pcmmio_dio_write(dev, 0, 0, 0);
+ pcmmio_dio_write(dev, 0, 0, 3);
+
+ /* Clear all the paged registers */
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_POL, 0);
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0);
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0);
}
-static void init_asics(struct comedi_device *dev)
-{ /* sets up an
- ASIC chip to defaults */
+/* devpriv->spinlock is already locked */
+static void pcmmio_stop_intr(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
struct pcmmio_private *devpriv = dev->private;
- int asic;
-
- for (asic = 0; asic < 1; ++asic) {
- int port, page;
- unsigned long baseaddr = devpriv->asics[asic].iobase;
-
- switch_page(dev, asic, 0); /* switch back to page 0 */
-
- /* first, clear all the DIO port bits */
- for (port = 0; port < PORTS_PER_ASIC; ++port)
- outb(0, baseaddr + REG_PORT0 + port);
-
- /* Next, clear all the paged registers for each page */
- for (page = 1; page < NUM_PAGES; ++page) {
- int reg;
- /* now clear all the paged registers */
- switch_page(dev, asic, page);
- for (reg = FIRST_PAGED_REG;
- reg < FIRST_PAGED_REG + NUM_PAGED_REGS; ++reg)
- outb(0, baseaddr + reg);
- }
- /* DEBUG set rising edge interrupts on port0 of both asics */
- /*switch_page(dev, asic, PAGE_POL);
- outb(0xff, baseaddr + REG_POL0);
- switch_page(dev, asic, PAGE_ENAB);
- outb(0xff, baseaddr + REG_ENAB0); */
- /* END DEBUG */
+ devpriv->enabled_mask = 0;
+ devpriv->active = 0;
+ s->async->inttrig = NULL;
- /* switch back to default page 0 */
- switch_page(dev, asic, 0);
- }
+ /* disable all dio interrupts */
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0);
}
-#ifdef notused
-static void lock_port(struct comedi_device *dev, int asic, int port)
+static void pcmmio_handle_dio_intr(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int triggered)
{
struct pcmmio_private *devpriv = dev->private;
+ unsigned int oldevents = s->async->events;
+ unsigned int len = s->async->cmd.chanlist_len;
+ unsigned int val = 0;
+ unsigned long flags;
+ int i;
- if (asic < 0 || asic >= 1)
- return; /* paranoia */
- if (port < 0 || port >= PORTS_PER_ASIC)
- return; /* more paranoia */
+ spin_lock_irqsave(&devpriv->spinlock, flags);
- devpriv->asics[asic].pagelock |= 0x1 << port;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- devpriv->asics[asic].iobase + REG_PAGELOCK);
- return;
-}
+ if (!devpriv->active)
+ goto done;
-static void unlock_port(struct comedi_device *dev, int asic, int port)
-{
- struct pcmmio_private *devpriv = dev->private;
+ if (!(triggered & devpriv->enabled_mask))
+ goto done;
- if (asic < 0 || asic >= 1)
- return; /* paranoia */
- if (port < 0 || port >= PORTS_PER_ASIC)
- return; /* more paranoia */
- devpriv->asics[asic].pagelock &= ~(0x1 << port) | REG_LOCK_MASK;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- devpriv->asics[asic].iobase + REG_PAGELOCK);
-}
-#endif /* notused */
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]);
-static void pcmmio_stop_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcmmio_private *devpriv = dev->private;
- int nports, firstport, asic, port;
+ if (triggered & (1 << chan))
+ val |= (1 << i);
+ }
- asic = subpriv->dio.intr.asic;
- if (asic < 0)
- return; /* not an interrupt subdev */
+ /* Write the scan to the buffer. */
+ if (comedi_buf_put(s->async, val) &&
+ comedi_buf_put(s->async, val >> 16)) {
+ s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
+ } else {
+ /* Overflow! Stop acquisition!! */
+ /* TODO: STOP_ACQUISITION_CALL_HERE!! */
+ pcmmio_stop_intr(dev, s);
+ }
- subpriv->dio.intr.enabled_mask = 0;
- subpriv->dio.intr.active = 0;
- s->async->inttrig = NULL;
- nports = subpriv->dio.intr.num_asic_chans / CHANS_PER_PORT;
- firstport = subpriv->dio.intr.asic_chan / CHANS_PER_PORT;
- switch_page(dev, asic, PAGE_ENAB);
- for (port = firstport; port < firstport + nports; ++port) {
- /* disable all intrs for this subdev.. */
- outb(0, devpriv->asics[asic].iobase + REG_ENAB0 + port);
+ /* Check for end of acquisition. */
+ if (!devpriv->continuous) {
+ /* stop_src == TRIG_COUNT */
+ if (devpriv->stop_count > 0) {
+ devpriv->stop_count--;
+ if (devpriv->stop_count == 0) {
+ s->async->events |= COMEDI_CB_EOA;
+ /* TODO: STOP_ACQUISITION_CALL_HERE!! */
+ pcmmio_stop_intr(dev, s);
+ }
+ }
}
+
+done:
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
+
+ if (oldevents != s->async->events)
+ comedi_event(dev, s);
}
static irqreturn_t interrupt_pcmmio(int irq, void *d)
{
- int asic, got1 = 0;
- struct comedi_device *dev = (struct comedi_device *)d;
- struct pcmmio_private *devpriv = dev->private;
- int i;
+ struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int triggered;
+ unsigned char int_pend;
- for (asic = 0; asic < MAX_ASICS; ++asic) {
- if (irq == devpriv->asics[asic].irq) {
- unsigned long flags;
- unsigned triggered = 0;
- unsigned long iobase = devpriv->asics[asic].iobase;
- /* it is an interrupt for ASIC #asic */
- unsigned char int_pend;
-
- spin_lock_irqsave(&devpriv->asics[asic].spinlock,
- flags);
-
- int_pend = inb(iobase + REG_INT_PENDING) & 0x07;
-
- if (int_pend) {
- int port;
- for (port = 0; port < INTR_PORTS_PER_ASIC;
- ++port) {
- if (int_pend & (0x1 << port)) {
- unsigned char
- io_lines_with_edges = 0;
- switch_page(dev, asic,
- PAGE_INT_ID);
- io_lines_with_edges =
- inb(iobase +
- REG_INT_ID0 + port);
-
- if (io_lines_with_edges)
- /*
- * clear pending
- * interrupt
- */
- outb(0, iobase +
- REG_INT_ID0 +
- port);
-
- triggered |=
- io_lines_with_edges <<
- port * 8;
- }
- }
-
- ++got1;
- }
+ /* are there any interrupts pending */
+ int_pend = inb(dev->iobase + PCMMIO_INT_PENDING_REG) & 0x07;
+ if (!int_pend)
+ return IRQ_NONE;
- spin_unlock_irqrestore(&devpriv->asics[asic].spinlock,
- flags);
-
- if (triggered) {
- struct comedi_subdevice *s;
- /*
- * TODO here: dispatch io lines to subdevs
- * with commands..
- */
- printk
- (KERN_DEBUG "got edge detect interrupt %d asic %d which_chans: %06x\n",
- irq, asic, triggered);
- for (i = 2; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- /*
- * this is an interrupt subdev,
- * and it matches this asic!
- */
- if (subpriv->dio.intr.asic == asic) {
- unsigned long flags;
- unsigned oldevents;
-
- spin_lock_irqsave(&subpriv->dio.
- intr.spinlock,
- flags);
-
- oldevents = s->async->events;
-
- if (subpriv->dio.intr.active) {
- unsigned mytrig =
- ((triggered >>
- subpriv->dio.intr.asic_chan)
- &
- ((0x1 << subpriv->
- dio.intr.
- num_asic_chans) -
- 1)) << subpriv->
- dio.intr.first_chan;
- if (mytrig &
- subpriv->dio.
- intr.enabled_mask) {
- unsigned int val
- = 0;
- unsigned int n,
- ch, len;
-
- len =
- s->
- async->cmd.chanlist_len;
- for (n = 0;
- n < len;
- n++) {
- ch = CR_CHAN(s->async->cmd.chanlist[n]);
- if (mytrig & (1U << ch))
- val |= (1U << n);
- }
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s->async, val)
- &&
- comedi_buf_put
- (s->async,
- val >> 16)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr
- (dev,
- s);
- }
-
- /* Check for end of acquisition. */
- if (!subpriv->dio.intr.continuous) {
- /* stop_src == TRIG_COUNT */
- if (subpriv->dio.intr.stop_count > 0) {
- subpriv->dio.intr.stop_count--;
- if (subpriv->dio.intr.stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr
- (dev,
- s);
- }
- }
- }
- }
- }
-
- spin_unlock_irqrestore
- (&subpriv->dio.intr.
- spinlock, flags);
-
- if (oldevents !=
- s->async->events) {
- comedi_event(dev, s);
- }
-
- }
-
- }
- }
+ /* get, and clear, the pending interrupts */
+ triggered = pcmmio_dio_read(dev, PCMMIO_PAGE_INT_ID, 0);
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0);
+
+ pcmmio_handle_dio_intr(dev, s, triggered);
- }
- }
- if (!got1)
- return IRQ_NONE; /* interrupt from other source */
return IRQ_HANDLED;
}
+/* devpriv->spinlock is already locked */
static int pcmmio_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcmmio_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int bits = 0;
+ unsigned int pol_bits = 0;
+ int i;
- if (!subpriv->dio.intr.continuous && subpriv->dio.intr.stop_count == 0) {
+ if (!devpriv->continuous && devpriv->stop_count == 0) {
/* An empty acquisition! */
s->async->events |= COMEDI_CB_EOA;
- subpriv->dio.intr.active = 0;
+ devpriv->active = 0;
return 1;
- } else {
- unsigned bits = 0, pol_bits = 0, n;
- int nports, firstport, asic, port;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- asic = subpriv->dio.intr.asic;
- if (asic < 0)
- return 1; /* not an interrupt
- subdev */
- subpriv->dio.intr.enabled_mask = 0;
- subpriv->dio.intr.active = 1;
- nports = subpriv->dio.intr.num_asic_chans / CHANS_PER_PORT;
- firstport = subpriv->dio.intr.asic_chan / CHANS_PER_PORT;
- if (cmd->chanlist) {
- for (n = 0; n < cmd->chanlist_len; n++) {
- bits |= (1U << CR_CHAN(cmd->chanlist[n]));
- pol_bits |= (CR_AREF(cmd->chanlist[n])
- || CR_RANGE(cmd->
- chanlist[n]) ? 1U : 0U)
- << CR_CHAN(cmd->chanlist[n]);
- }
- }
- bits &= ((0x1 << subpriv->dio.intr.num_asic_chans) -
- 1) << subpriv->dio.intr.first_chan;
- subpriv->dio.intr.enabled_mask = bits;
-
- {
- /*
- * the below code configures the board
- * to use a specific IRQ from 0-15.
- */
- unsigned char b;
- /*
- * set resource enable register
- * to enable IRQ operation
- */
- outb(1 << 4, dev->iobase + 3);
- /* set bits 0-3 of b to the irq number from 0-15 */
- b = dev->irq & ((1 << 4) - 1);
- outb(b, dev->iobase + 2);
- /* done, we told the board what irq to use */
- }
+ }
- switch_page(dev, asic, PAGE_ENAB);
- for (port = firstport; port < firstport + nports; ++port) {
- unsigned enab =
- bits >> (subpriv->dio.intr.first_chan + (port -
- firstport)
- * 8) & 0xff, pol =
- pol_bits >> (subpriv->dio.intr.first_chan +
- (port - firstport) * 8) & 0xff;
- /* set enab intrs for this subdev.. */
- outb(enab,
- devpriv->asics[asic].iobase + REG_ENAB0 + port);
- switch_page(dev, asic, PAGE_POL);
- outb(pol,
- devpriv->asics[asic].iobase + REG_ENAB0 + port);
+ devpriv->enabled_mask = 0;
+ devpriv->active = 1;
+ if (cmd->chanlist) {
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ unsigned int chanspec = cmd->chanlist[i];
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+
+ bits |= (1 << chan);
+ pol_bits |= (((aref || range) ? 1 : 0) << chan);
}
}
+ bits &= ((1 << s->n_chan) - 1);
+ devpriv->enabled_mask = bits;
+
+ /* set polarity and enable interrupts */
+ pcmmio_dio_write(dev, pol_bits, PCMMIO_PAGE_POL, 0);
+ pcmmio_dio_write(dev, bits, PCMMIO_PAGE_ENAB, 0);
+
return 0;
}
static int pcmmio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct pcmmio_private *devpriv = dev->private;
unsigned long flags;
- spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags);
- if (subpriv->dio.intr.active)
+ spin_lock_irqsave(&devpriv->spinlock, flags);
+ if (devpriv->active)
pcmmio_stop_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags);
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
return 0;
}
@@ -696,17 +471,18 @@ static int
pcmmio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
+ struct pcmmio_private *devpriv = dev->private;
unsigned long flags;
int event = 0;
if (trignum != 0)
return -EINVAL;
- spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags);
+ spin_lock_irqsave(&devpriv->spinlock, flags);
s->async->inttrig = NULL;
- if (subpriv->dio.intr.active)
+ if (devpriv->active)
event = pcmmio_start_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags);
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -719,23 +495,24 @@ pcmmio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
*/
static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct pcmmio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned long flags;
int event = 0;
- spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags);
- subpriv->dio.intr.active = 1;
+ spin_lock_irqsave(&devpriv->spinlock, flags);
+ devpriv->active = 1;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
case TRIG_COUNT:
- subpriv->dio.intr.continuous = 0;
- subpriv->dio.intr.stop_count = cmd->stop_arg;
+ devpriv->continuous = 0;
+ devpriv->stop_count = cmd->stop_arg;
break;
default:
/* TRIG_NONE */
- subpriv->dio.intr.continuous = 1;
- subpriv->dio.intr.stop_count = 0;
+ devpriv->continuous = 1;
+ devpriv->stop_count = 0;
break;
}
@@ -749,7 +526,7 @@ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
event = pcmmio_start_intr(dev, s);
break;
}
- spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags);
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -812,188 +589,171 @@ static int pcmmio_cmdtest(struct comedi_device *dev,
return 0;
}
-static int adc_wait_ready(unsigned long iobase)
+static int pcmmio_ai_wait_for_eoc(unsigned long iobase, unsigned int timeout)
{
- unsigned long retry = 100000;
- while (retry--)
- if (inb(iobase + 3) & 0x80)
+ unsigned char status;
+
+ while (timeout--) {
+ status = inb(iobase + PCMMIO_AI_STATUS_REG);
+ if (status & PCMMIO_AI_STATUS_DATA_READY)
return 0;
- return 1;
+ }
+ return -ETIME;
}
-/* All this is for AI and AO */
-static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmmio_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned long iobase = subpriv->iobase;
+ unsigned long iobase = dev->iobase;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int aref = CR_AREF(insn->chanspec);
+ unsigned char cmd = 0;
+ unsigned int val;
+ int ret;
+ int i;
/*
- 1. write the CMD byte (to BASE+2)
- 2. read junk lo byte (BASE+0)
- 3. read junk hi byte (BASE+1)
- 4. (mux settled so) write CMD byte again (BASE+2)
- 5. read valid lo byte(BASE+0)
- 6. read valid hi byte(BASE+1)
-
- Additionally note that the BASE += 4 if the channel >= 8
+ * The PCM-MIO uses two Linear Tech LTC1859CG 8-channel A/D converters.
+ * The devices use a full duplex serial interface which transmits and
+ * receives data simultaneously. An 8-bit command is shifted into the
+ * ADC interface to configure it for the next conversion. At the same
+ * time, the data from the previous conversion is shifted out of the
+ * device. Consequently, the conversion result is delayed by one
+ * conversion from the command word.
+ *
+ * Setup the cmd for the conversions then do a dummy conversion to
+ * flush the junk data. Then do each conversion requested by the
+ * comedi_insn. Note that the last conversion will leave junk data
+ * in ADC which will get flushed on the next comedi_insn.
*/
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- unsigned chan = CR_CHAN(insn->chanspec), range =
- CR_RANGE(insn->chanspec), aref = CR_AREF(insn->chanspec);
- unsigned char command_byte = 0;
- unsigned iooffset = 0;
- unsigned short sample, adc_adjust = 0;
-
- if (chan > 7)
- chan -= 8, iooffset = 4; /*
- * use the second dword
- * for channels > 7
- */
-
- if (aref != AREF_DIFF) {
- aref = AREF_GROUND;
- command_byte |= 1 << 7; /*
- * set bit 7 to indicate
- * single-ended
- */
- }
- if (range < 2)
- adc_adjust = 0x8000; /*
- * bipolar ranges
- * (-5,5 .. -10,10 need to be
- * adjusted -- that is.. they
- * need to wrap around by
- * adding 0x8000
- */
-
- if (chan % 2) {
- command_byte |= 1 << 6; /*
- * odd-numbered channels
- * have bit 6 set
- */
- }
-
- /* select the channel, bits 4-5 == chan/2 */
- command_byte |= ((chan / 2) & 0x3) << 4;
+ if (chan > 7) {
+ chan -= 8;
+ iobase += PCMMIO_AI_2ND_ADC_OFFSET;
+ }
- /* set the range, bits 2-3 */
- command_byte |= (range & 0x3) << 2;
+ if (aref == AREF_GROUND)
+ cmd |= PCMMIO_AI_CMD_SE;
+ if (chan % 2)
+ cmd |= PCMMIO_AI_CMD_ODD_CHAN;
+ cmd |= PCMMIO_AI_CMD_CHAN_SEL(chan / 2);
+ cmd |= PCMMIO_AI_CMD_RANGE(range);
- /* need to do this twice to make sure mux settled */
- /* chan/range/aref select */
- outb(command_byte, iobase + iooffset + 2);
+ outb(cmd, iobase + PCMMIO_AI_CMD_REG);
+ ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
- /* wait for the adc to say it finised the conversion */
- adc_wait_ready(iobase + iooffset);
+ val = inb(iobase + PCMMIO_AI_LSB_REG);
+ val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8;
- /* select the chan/range/aref AGAIN */
- outb(command_byte, iobase + iooffset + 2);
+ for (i = 0; i < insn->n; i++) {
+ outb(cmd, iobase + PCMMIO_AI_CMD_REG);
+ ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
- adc_wait_ready(iobase + iooffset);
+ val = inb(iobase + PCMMIO_AI_LSB_REG);
+ val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8;
- /* read data lo byte */
- sample = inb(iobase + iooffset + 0);
+ /* bipolar data is two's complement */
+ if (comedi_range_is_bipolar(s, range))
+ val = comedi_offset_munge(s, val);
- /* read data hi byte */
- sample |= inb(iobase + iooffset + 1) << 8;
- sample += adc_adjust; /* adjustment .. munge data */
- data[n] = sample;
+ data[i] = val;
}
- /* return the number of samples read/written */
- return n;
+
+ return insn->n;
}
-static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmmio_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- for (n = 0; n < insn->n; n++) {
- unsigned chan = CR_CHAN(insn->chanspec);
- if (chan < s->n_chan)
- data[n] = subpriv->ao.shadow_samples[chan];
- }
- return n;
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
}
-static int wait_dac_ready(unsigned long iobase)
+static int pcmmio_ao_wait_for_eoc(unsigned long iobase, unsigned int timeout)
{
- unsigned long retry = 100000L;
+ unsigned char status;
- /* This may seem like an absurd way to handle waiting and violates the
- "no busy waiting" policy. The fact is that the hardware is
- normally so fast that we usually only need one time through the loop
- anyway. The longer timeout is for rare occasions and for detecting
- non-existent hardware. */
-
- while (retry--) {
- if (inb(iobase + 3) & 0x80)
+ while (timeout--) {
+ status = inb(iobase + PCMMIO_AO_STATUS_REG);
+ if (status & PCMMIO_AO_STATUS_DATA_READY)
return 0;
-
}
- return 1;
+ return -ETIME;
}
-static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmmio_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned iobase = subpriv->iobase, iooffset = 0;
-
- for (n = 0; n < insn->n; n++) {
- unsigned chan = CR_CHAN(insn->chanspec), range =
- CR_RANGE(insn->chanspec);
- if (chan < s->n_chan) {
- unsigned char command_byte = 0, range_byte =
- range & ((1 << 4) - 1);
- if (chan >= 4)
- chan -= 4, iooffset += 4;
- /* set the range.. */
- outb(range_byte, iobase + iooffset + 0);
- outb(0, iobase + iooffset + 1);
-
- /* tell it to begin */
- command_byte = (chan << 1) | 0x60;
- outb(command_byte, iobase + iooffset + 2);
-
- wait_dac_ready(iobase + iooffset);
-
- /* low order byte */
- outb(data[n] & 0xff, iobase + iooffset + 0);
-
- /* high order byte */
- outb((data[n] >> 8) & 0xff, iobase + iooffset + 1);
-
- /*
- * set bit 4 of command byte to indicate
- * data is loaded and trigger conversion
- */
- command_byte = 0x70 | (chan << 1);
- /* trigger converion */
- outb(command_byte, iobase + iooffset + 2);
-
- wait_dac_ready(iobase + iooffset);
-
- /* save to shadow register for ao_rinsn */
- subpriv->ao.shadow_samples[chan] = data[n];
- }
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned long iobase = dev->iobase;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ unsigned char cmd = 0;
+ int ret;
+ int i;
+
+ /*
+ * The PCM-MIO has two Linear Tech LTC2704 DAC devices. Each device
+ * is a 4-channel converter with software-selectable output range.
+ */
+
+ if (chan > 3) {
+ cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan - 4);
+ iobase += PCMMIO_AO_2ND_DAC_OFFSET;
+ } else {
+ cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan);
}
- return n;
+
+ /* set the range for the channel */
+ outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG);
+ outb(0, iobase + PCMMIO_AO_MSB_REG);
+ outb(cmd | PCMMIO_AO_CMD_WR_SPAN_UPDATE, iobase + PCMMIO_AO_CMD_REG);
+ ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ /* write the data to the channel */
+ outb(val & 0xff, iobase + PCMMIO_AO_LSB_REG);
+ outb((val >> 8) & 0xff, iobase + PCMMIO_AO_MSB_REG);
+ outb(cmd | PCMMIO_AO_CMD_WR_CODE_UPDATE,
+ iobase + PCMMIO_AO_CMD_REG);
+ ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
+
+ devpriv->ao_readback[chan] = val;
+ }
+
+ return insn->n;
}
static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct pcmmio_private *devpriv;
struct comedi_subdevice *s;
- int sdev_no, chans_left, n_dio_subdevs, n_subdevs, port, asic,
- thisasic_chanct = 0;
- unsigned int irq[MAX_ASICS];
int ret;
- irq[0] = it->options[1];
-
ret = comedi_request_region(dev, it->options[0], 32);
if (ret)
return ret;
@@ -1002,177 +762,99 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- for (asic = 0; asic < MAX_ASICS; ++asic) {
- devpriv->asics[asic].num = asic;
- devpriv->asics[asic].iobase =
- dev->iobase + 16 + asic * ASIC_IOSIZE;
- /*
- * this gets actually set at the end of this function when we
- * request_irqs
- */
- devpriv->asics[asic].irq = 0;
- spin_lock_init(&devpriv->asics[asic].spinlock);
- }
+ spin_lock_init(&devpriv->pagelock);
+ spin_lock_init(&devpriv->spinlock);
- chans_left = CHANS_PER_ASIC * 1;
- n_dio_subdevs = CALC_N_DIO_SUBDEVS(chans_left);
- n_subdevs = n_dio_subdevs + 2;
- devpriv->sprivs =
- kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private),
- GFP_KERNEL);
- if (!devpriv->sprivs) {
- printk(KERN_ERR "comedi%d: cannot allocate subdevice private data structures\n",
- dev->minor);
- return -ENOMEM;
+ pcmmio_reset(dev);
+
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], interrupt_pcmmio, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = it->options[1];
+
+ /* configure the interrupt routing on the board */
+ outb(PCMMIO_AI_RES_ENA_DIO_RES_ACCESS,
+ dev->iobase + PCMMIO_AI_RES_ENA_REG);
+ outb(PCMMIO_RESOURCE_IRQ(dev->irq),
+ dev->iobase + PCMMIO_RESOURCE_REG);
+ }
}
- ret = comedi_alloc_subdevices(dev, n_subdevs);
+ ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- /* First, AI */
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- s->private = &devpriv->sprivs[0];
- s->maxdata = 0xffff;
- s->range_table = &ranges_ai;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->type = COMEDI_SUBD_AI;
- s->n_chan = 16;
- s->len_chanlist = s->n_chan;
- s->insn_read = ai_rinsn;
- subpriv->iobase = dev->iobase + 0;
- /* initialize the resource enable register by clearing it */
- outb(0, subpriv->iobase + 3);
- outb(0, subpriv->iobase + 4 + 3);
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 16;
+ s->maxdata = 0xffff;
+ s->range_table = &pcmmio_ai_ranges;
+ s->insn_read = pcmmio_ai_insn_read;
- /* Next, AO */
- s = &dev->subdevices[1];
- s->private = &devpriv->sprivs[1];
- s->maxdata = 0xffff;
- s->range_table = &ranges_ao;
- s->subdev_flags = SDF_READABLE;
- s->type = COMEDI_SUBD_AO;
- s->n_chan = 8;
- s->len_chanlist = s->n_chan;
- s->insn_read = ao_rinsn;
- s->insn_write = ao_winsn;
- subpriv->iobase = dev->iobase + 8;
/* initialize the resource enable register by clearing it */
- outb(0, subpriv->iobase + 3);
- outb(0, subpriv->iobase + 4 + 3);
-
- port = 0;
- asic = 0;
- for (sdev_no = 2; sdev_no < dev->n_subdevices; ++sdev_no) {
- int byte_no;
-
- s = &dev->subdevices[sdev_no];
- s->private = &devpriv->sprivs[sdev_no];
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->type = COMEDI_SUBD_DIO;
- s->insn_bits = pcmmio_dio_insn_bits;
- s->insn_config = pcmmio_dio_insn_config;
- s->n_chan = min(chans_left, MAX_CHANS_PER_SUBDEV);
- subpriv->dio.intr.asic = -1;
- subpriv->dio.intr.first_chan = -1;
- subpriv->dio.intr.asic_chan = -1;
- subpriv->dio.intr.num_asic_chans = -1;
- subpriv->dio.intr.active = 0;
- s->len_chanlist = 1;
-
- /* save the ioport address for each 'port' of 8 channels in the
- subdevice */
- for (byte_no = 0; byte_no < PORTS_PER_SUBDEV; ++byte_no, ++port) {
- if (port >= PORTS_PER_ASIC) {
- port = 0;
- ++asic;
- thisasic_chanct = 0;
- }
- subpriv->iobases[byte_no] =
- devpriv->asics[asic].iobase + port;
-
- if (thisasic_chanct <
- CHANS_PER_PORT * INTR_PORTS_PER_ASIC
- && subpriv->dio.intr.asic < 0) {
- /*
- * this is an interrupt subdevice,
- * so setup the struct
- */
- subpriv->dio.intr.asic = asic;
- subpriv->dio.intr.active = 0;
- subpriv->dio.intr.stop_count = 0;
- subpriv->dio.intr.first_chan = byte_no * 8;
- subpriv->dio.intr.asic_chan = thisasic_chanct;
- subpriv->dio.intr.num_asic_chans =
- s->n_chan - subpriv->dio.intr.first_chan;
- s->cancel = pcmmio_cancel;
- s->do_cmd = pcmmio_cmd;
- s->do_cmdtest = pcmmio_cmdtest;
- s->len_chanlist =
- subpriv->dio.intr.num_asic_chans;
- }
- thisasic_chanct += CHANS_PER_PORT;
- }
- spin_lock_init(&subpriv->dio.intr.spinlock);
-
- chans_left -= s->n_chan;
+ outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS,
+ dev->iobase + PCMMIO_AI_RES_ENA_REG);
+ outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS,
+ dev->iobase + PCMMIO_AI_RES_ENA_REG + PCMMIO_AI_2ND_ADC_OFFSET);
- if (!chans_left) {
- /*
- * reset the asic to our first asic,
- * to do intr subdevs
- */
- asic = 0;
- port = 0;
- }
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 0xffff;
+ s->range_table = &pcmmio_ao_ranges;
+ s->insn_read = pcmmio_ao_insn_read;
+ s->insn_write = pcmmio_ao_insn_write;
+ /* initialize the resource enable register by clearing it */
+ outb(0, dev->iobase + PCMMIO_AO_RESOURCE_ENA_REG);
+ outb(0, dev->iobase + PCMMIO_AO_2ND_DAC_OFFSET +
+ PCMMIO_AO_RESOURCE_ENA_REG);
+
+ /* Digital I/O subdevice with interrupt support */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->len_chanlist = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcmmio_dio_insn_bits;
+ s->insn_config = pcmmio_dio_insn_config;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->cancel = pcmmio_cancel;
+ s->do_cmd = pcmmio_cmd;
+ s->do_cmdtest = pcmmio_cmdtest;
}
- init_asics(dev); /* clear out all the registers, basically */
-
- for (asic = 0; irq[0] && asic < MAX_ASICS; ++asic) {
- if (irq[asic]
- && request_irq(irq[asic], interrupt_pcmmio,
- IRQF_SHARED, dev->board_name, dev)) {
- int i;
- /* unroll the allocated irqs.. */
- for (i = asic - 1; i >= 0; --i) {
- free_irq(irq[i], dev);
- devpriv->asics[i].irq = irq[i] = 0;
- }
- irq[asic] = 0;
- }
- devpriv->asics[asic].irq = irq[asic];
- }
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcmmio_dio_insn_bits;
+ s->insn_config = pcmmio_dio_insn_config;
- return 1;
-}
-
-static void pcmmio_detach(struct comedi_device *dev)
-{
- struct pcmmio_private *devpriv = dev->private;
- int i;
-
- if (devpriv) {
- for (i = 0; i < MAX_ASICS; ++i) {
- if (devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- kfree(devpriv->sprivs);
- }
- comedi_legacy_detach(dev);
+ return 0;
}
static struct comedi_driver pcmmio_driver = {
.driver_name = "pcmmio",
.module = THIS_MODULE,
.attach = pcmmio_attach,
- .detach = pcmmio_detach,
+ .detach = comedi_legacy_detach,
};
module_comedi_driver(pcmmio_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Winsystems PCM-MIO PC/104 board");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 954fa96..a8f390f 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -75,7 +75,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -127,36 +126,53 @@ static const struct pcmuio_board pcmuio_boards[] = {
},
};
-struct pcmuio_subdev_private {
- /* The below is only used for intr subdevices */
- struct {
- /* if non-negative, this subdev has an interrupt asic */
- int asic;
- /*
- * subdev-relative channel mask for channels
- * we are interested in
- */
- int enabled_mask;
- int active;
- int stop_count;
- int continuous;
- spinlock_t spinlock;
- } intr;
+struct pcmuio_asic {
+ spinlock_t pagelock; /* protects the page registers */
+ spinlock_t spinlock; /* protects member variables */
+ unsigned int enabled_mask;
+ unsigned int stop_count;
+ unsigned int active:1;
+ unsigned int continuous:1;
};
struct pcmuio_private {
- struct {
- unsigned int irq;
- spinlock_t spinlock;
- } asics[PCMUIO_MAX_ASICS];
- struct pcmuio_subdev_private *sprivs;
+ struct pcmuio_asic asics[PCMUIO_MAX_ASICS];
+ unsigned int irq2;
};
+static inline unsigned long pcmuio_asic_iobase(struct comedi_device *dev,
+ int asic)
+{
+ return dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+}
+
+static inline int pcmuio_subdevice_to_asic(struct comedi_subdevice *s)
+{
+ /*
+ * subdevice 0 and 1 are handled by the first asic
+ * subdevice 2 and 3 are handled by the second asic
+ */
+ return s->index / 2;
+}
+
+static inline int pcmuio_subdevice_to_port(struct comedi_subdevice *s)
+{
+ /*
+ * subdevice 0 and 2 use port registers 0-2
+ * subdevice 1 and 3 use port registers 3-5
+ */
+ return (s->index % 2) ? 3 : 0;
+}
+
static void pcmuio_write(struct comedi_device *dev, unsigned int val,
int asic, int page, int port)
{
- unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+ struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
+ unsigned long iobase = pcmuio_asic_iobase(dev, asic);
+ unsigned long flags;
+ spin_lock_irqsave(&chip->pagelock, flags);
if (page == 0) {
/* Port registers are valid for any page */
outb(val & 0xff, iobase + PCMUIO_PORT_REG(port + 0));
@@ -168,14 +184,19 @@ static void pcmuio_write(struct comedi_device *dev, unsigned int val,
outb((val >> 8) & 0xff, iobase + PCMUIO_PAGE_REG(1));
outb((val >> 16) & 0xff, iobase + PCMUIO_PAGE_REG(2));
}
+ spin_unlock_irqrestore(&chip->pagelock, flags);
}
static unsigned int pcmuio_read(struct comedi_device *dev,
int asic, int page, int port)
{
- unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+ struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
+ unsigned long iobase = pcmuio_asic_iobase(dev, asic);
+ unsigned long flags;
unsigned int val;
+ spin_lock_irqsave(&chip->pagelock, flags);
if (page == 0) {
/* Port registers are valid for any page */
val = inb(iobase + PCMUIO_PORT_REG(port + 0));
@@ -187,6 +208,7 @@ static unsigned int pcmuio_read(struct comedi_device *dev,
val |= (inb(iobase + PCMUIO_PAGE_REG(1)) << 8);
val |= (inb(iobase + PCMUIO_PAGE_REG(2)) << 16);
}
+ spin_unlock_irqrestore(&chip->pagelock, flags);
return val;
}
@@ -203,30 +225,35 @@ static unsigned int pcmuio_read(struct comedi_device *dev,
*/
static int pcmuio_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int mask = data[0] & s->io_bits; /* outputs only */
- unsigned int bits = data[1];
- int asic = s->index / 2;
- int port = (s->index % 2) ? 3 : 0;
+ int asic = pcmuio_subdevice_to_asic(s);
+ int port = pcmuio_subdevice_to_port(s);
+ unsigned int chanmask = (1 << s->n_chan) - 1;
+ unsigned int mask;
unsigned int val;
- /* get inverted state of the channels from the port */
- val = pcmuio_read(dev, asic, 0, port);
-
- /* get the true state of the channels */
- s->state = val ^ ((0x1 << s->n_chan) - 1);
-
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- s->state &= ~mask;
- s->state |= (mask & bits);
-
- /* invert the state and update the channels */
- val = s->state ^ ((0x1 << s->n_chan) - 1);
+ /*
+ * Outputs are inverted, invert the state and
+ * update the channels.
+ *
+ * The s->io_bits mask makes sure the input channels
+ * are '0' so that the outputs pins stay in a high
+ * z-state.
+ */
+ val = ~s->state & chanmask;
+ val &= s->io_bits;
pcmuio_write(dev, val, asic, 0, port);
}
- data[1] = s->state;
+ /* get inverted state of the channels from the port */
+ val = pcmuio_read(dev, asic, 0, port);
+
+ /* return the true state of the channels */
+ data[1] = ~val & chanmask;
return insn->n;
}
@@ -236,8 +263,8 @@ static int pcmuio_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- int asic = s->index / 2;
- int port = (s->index % 2) ? 3 : 0;
+ int asic = pcmuio_subdevice_to_asic(s);
+ int port = pcmuio_subdevice_to_port(s);
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
@@ -267,18 +294,16 @@ static void pcmuio_reset(struct comedi_device *dev)
}
}
+/* chip->spinlock is already locked */
static void pcmuio_stop_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
- int asic;
-
- asic = subpriv->intr.asic;
- if (asic < 0)
- return; /* not an interrupt subdev */
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
- subpriv->intr.enabled_mask = 0;
- subpriv->intr.active = 0;
+ chip->enabled_mask = 0;
+ chip->active = 0;
s->async->inttrig = NULL;
/* disable all intrs for this subdev.. */
@@ -289,29 +314,27 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned triggered)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned int len = s->async->cmd.chanlist_len;
unsigned oldevents = s->async->events;
unsigned int val = 0;
unsigned long flags;
- unsigned mytrig;
unsigned int i;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
+ spin_lock_irqsave(&chip->spinlock, flags);
- if (!subpriv->intr.active)
+ if (!chip->active)
goto done;
- mytrig = triggered;
- mytrig &= ((0x1 << s->n_chan) - 1);
-
- if (!(mytrig & subpriv->intr.enabled_mask))
+ if (!(triggered & chip->enabled_mask))
goto done;
for (i = 0; i < len; i++) {
unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]);
- if (mytrig & (1U << chan))
- val |= (1U << i);
+ if (triggered & (1 << chan))
+ val |= (1 << i);
}
/* Write the scan to the buffer. */
@@ -325,11 +348,11 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
}
/* Check for end of acquisition. */
- if (!subpriv->intr.continuous) {
+ if (!chip->continuous) {
/* stop_src == TRIG_COUNT */
- if (subpriv->intr.stop_count > 0) {
- subpriv->intr.stop_count--;
- if (subpriv->intr.stop_count == 0) {
+ if (chip->stop_count > 0) {
+ chip->stop_count--;
+ if (chip->stop_count == 0) {
s->async->events |= COMEDI_CB_EOA;
/* TODO: STOP_ACQUISITION_CALL_HERE!! */
pcmuio_stop_intr(dev, s);
@@ -338,7 +361,7 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
}
done:
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
if (oldevents != s->async->events)
comedi_event(dev, s);
@@ -346,114 +369,93 @@ done:
static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
{
- struct pcmuio_private *devpriv = dev->private;
- struct pcmuio_subdev_private *subpriv;
- unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
- unsigned int triggered = 0;
- int got1 = 0;
- unsigned long flags;
- unsigned char int_pend;
- int i;
+ /* there are could be two asics so we can't use dev->read_subdev */
+ struct comedi_subdevice *s = &dev->subdevices[asic * 2];
+ unsigned long iobase = pcmuio_asic_iobase(dev, asic);
+ unsigned int val;
- spin_lock_irqsave(&devpriv->asics[asic].spinlock, flags);
+ /* are there any interrupts pending */
+ val = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
+ if (!val)
+ return 0;
- int_pend = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
- if (int_pend) {
- triggered = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
+ /* get, and clear, the pending interrupts */
+ val = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
- ++got1;
- }
+ /* handle the pending interrupts */
+ pcmuio_handle_intr_subdev(dev, s, val);
- spin_unlock_irqrestore(&devpriv->asics[asic].spinlock, flags);
-
- if (triggered) {
- struct comedi_subdevice *s;
- /* TODO here: dispatch io lines to subdevs with commands.. */
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- subpriv = s->private;
- if (subpriv->intr.asic == asic) {
- /*
- * This is an interrupt subdev, and it
- * matches this asic!
- */
- pcmuio_handle_intr_subdev(dev, s,
- triggered);
- }
- }
- }
- return got1;
+ return 1;
}
static irqreturn_t pcmuio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcmuio_private *devpriv = dev->private;
- int got1 = 0;
- int asic;
+ int handled = 0;
- for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic) {
- if (irq == devpriv->asics[asic].irq) {
- /* it is an interrupt for ASIC #asic */
- if (pcmuio_handle_asic_interrupt(dev, asic))
- got1++;
- }
- }
- if (!got1)
- return IRQ_NONE; /* interrupt from other source */
- return IRQ_HANDLED;
+ if (irq == dev->irq)
+ handled += pcmuio_handle_asic_interrupt(dev, 0);
+ if (irq == devpriv->irq2)
+ handled += pcmuio_handle_asic_interrupt(dev, 1);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
+/* chip->spinlock is already locked */
static int pcmuio_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int bits = 0;
+ unsigned int pol_bits = 0;
+ int i;
- if (!subpriv->intr.continuous && subpriv->intr.stop_count == 0) {
+ if (!chip->continuous && chip->stop_count == 0) {
/* An empty acquisition! */
s->async->events |= COMEDI_CB_EOA;
- subpriv->intr.active = 0;
+ chip->active = 0;
return 1;
- } else {
- unsigned bits = 0, pol_bits = 0, n;
- int asic;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- asic = subpriv->intr.asic;
- if (asic < 0)
- return 1; /* not an interrupt
- subdev */
- subpriv->intr.enabled_mask = 0;
- subpriv->intr.active = 1;
- if (cmd->chanlist) {
- for (n = 0; n < cmd->chanlist_len; n++) {
- bits |= (1U << CR_CHAN(cmd->chanlist[n]));
- pol_bits |= (CR_AREF(cmd->chanlist[n])
- || CR_RANGE(cmd->
- chanlist[n]) ? 1U : 0U)
- << CR_CHAN(cmd->chanlist[n]);
- }
- }
- bits &= ((0x1 << s->n_chan) - 1);
- subpriv->intr.enabled_mask = bits;
+ }
- /* set pol and enab intrs for this subdev.. */
- pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
- pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
+ chip->enabled_mask = 0;
+ chip->active = 1;
+ if (cmd->chanlist) {
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ unsigned int chanspec = cmd->chanlist[i];
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+
+ bits |= (1 << chan);
+ pol_bits |= ((aref || range) ? 1 : 0) << chan;
+ }
}
+ bits &= ((1 << s->n_chan) - 1);
+ chip->enabled_mask = bits;
+
+ /* set pol and enab intrs for this subdev.. */
+ pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
+ pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
+
return 0;
}
static int pcmuio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
- if (subpriv->intr.active)
+ spin_lock_irqsave(&chip->spinlock, flags);
+ if (chip->active)
pcmuio_stop_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
@@ -465,19 +467,21 @@ static int
pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
int event = 0;
if (trignum != 0)
return -EINVAL;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
+ spin_lock_irqsave(&chip->spinlock, flags);
s->async->inttrig = NULL;
- if (subpriv->intr.active)
+ if (chip->active)
event = pcmuio_start_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -490,24 +494,26 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
*/
static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
int event = 0;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
- subpriv->intr.active = 1;
+ spin_lock_irqsave(&chip->spinlock, flags);
+ chip->active = 1;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
case TRIG_COUNT:
- subpriv->intr.continuous = 0;
- subpriv->intr.stop_count = cmd->stop_arg;
+ chip->continuous = 0;
+ chip->stop_count = cmd->stop_arg;
break;
default:
/* TRIG_NONE */
- subpriv->intr.continuous = 1;
- subpriv->intr.stop_count = 0;
+ chip->continuous = 1;
+ chip->stop_count = 0;
break;
}
@@ -521,7 +527,7 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
event = pcmuio_start_intr(dev, s);
break;
}
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -589,13 +595,8 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcmuio_board *board = comedi_board(dev);
struct comedi_subdevice *s;
struct pcmuio_private *devpriv;
- struct pcmuio_subdev_private *subpriv;
- int sdev_no, n_subdevs, asic;
- unsigned int irq[PCMUIO_MAX_ASICS];
int ret;
-
- irq[0] = it->options[1];
- irq[1] = it->options[2];
+ int i;
ret = comedi_request_region(dev, it->options[0],
board->num_asics * PCMUIO_ASIC_IOSIZE);
@@ -606,62 +607,60 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic)
- spin_lock_init(&devpriv->asics[asic].spinlock);
+ for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
+ struct pcmuio_asic *chip = &devpriv->asics[i];
- n_subdevs = board->num_asics * 2;
- devpriv->sprivs = kcalloc(n_subdevs, sizeof(*subpriv), GFP_KERNEL);
- if (!devpriv->sprivs)
- return -ENOMEM;
+ spin_lock_init(&chip->pagelock);
+ spin_lock_init(&chip->spinlock);
+ }
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret)
- return ret;
+ pcmuio_reset(dev);
- for (sdev_no = 0; sdev_no < (int)dev->n_subdevices; ++sdev_no) {
- s = &dev->subdevices[sdev_no];
- subpriv = &devpriv->sprivs[sdev_no];
- s->private = subpriv;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->type = COMEDI_SUBD_DIO;
- s->insn_bits = pcmuio_dio_insn_bits;
- s->insn_config = pcmuio_dio_insn_config;
- s->n_chan = 24;
-
- /* subdevices 0 and 2 suppport interrupts */
- if ((sdev_no % 2) == 0) {
- /* setup the interrupt subdevice */
- subpriv->intr.asic = sdev_no / 2;
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = pcmuio_cancel;
- s->do_cmd = pcmuio_cmd;
- s->do_cmdtest = pcmuio_cmdtest;
- s->len_chanlist = s->n_chan;
- } else {
- subpriv->intr.asic = -1;
- s->len_chanlist = 1;
+ if (it->options[1]) {
+ /* request the irq for the 1st asic */
+ ret = request_irq(it->options[1], pcmuio_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
+ }
+
+ if (board->num_asics == 2) {
+ if (it->options[2] == dev->irq) {
+ /* the same irq (or none) is used by both asics */
+ devpriv->irq2 = it->options[2];
+ } else if (it->options[2]) {
+ /* request the irq for the 2nd asic */
+ ret = request_irq(it->options[2], pcmuio_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ devpriv->irq2 = it->options[2];
}
- spin_lock_init(&subpriv->intr.spinlock);
}
- pcmuio_reset(dev);
+ ret = comedi_alloc_subdevices(dev, board->num_asics * 2);
+ if (ret)
+ return ret;
- for (asic = 0; irq[0] && asic < PCMUIO_MAX_ASICS; ++asic) {
- if (irq[asic]
- && request_irq(irq[asic], pcmuio_interrupt,
- IRQF_SHARED, board->name, dev)) {
- int i;
- /* unroll the allocated irqs.. */
- for (i = asic - 1; i >= 0; --i) {
- free_irq(irq[i], dev);
- devpriv->asics[i].irq = irq[i] = 0;
- }
- irq[asic] = 0;
+ for (i = 0; i < dev->n_subdevices; ++i) {
+ s = &dev->subdevices[i];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcmuio_dio_insn_bits;
+ s->insn_config = pcmuio_dio_insn_config;
+
+ /* subdevices 0 and 2 can suppport interrupts */
+ if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) {
+ /* setup the interrupt subdevice */
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->cancel = pcmuio_cancel;
+ s->do_cmd = pcmuio_cmd;
+ s->do_cmdtest = pcmuio_cmdtest;
}
- devpriv->asics[asic].irq = irq[asic];
}
return 0;
@@ -670,14 +669,13 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static void pcmuio_detach(struct comedi_device *dev)
{
struct pcmuio_private *devpriv = dev->private;
- int i;
if (devpriv) {
- for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
- if (devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- kfree(devpriv->sprivs);
+ pcmuio_reset(dev);
+
+ /* free the 2nd irq if used, the core will free the 1st one */
+ if (devpriv->irq2 && devpriv->irq2 != dev->irq)
+ free_irq(devpriv->irq2, dev);
}
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 0d254a1..55e3c2e 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -402,12 +402,9 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
udelay(1);
dma_status = readb(dma_cs_addr);
}
- if (i == timeout) {
- printk
- ("plx9080: cancel() timed out waiting for dma %i done clear\n",
- channel);
+ if (i == timeout)
return -ETIMEDOUT;
- }
+
/* disable and abort channel */
writeb(PLX_DMA_ABORT_BIT, dma_cs_addr);
/* wait for dma done bit */
@@ -416,12 +413,8 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
udelay(1);
dma_status = readb(dma_cs_addr);
}
- if (i == timeout) {
- printk
- ("plx9080: cancel() timed out waiting for dma %i done set\n",
- channel);
+ if (i == timeout)
return -ETIMEDOUT;
- }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 44c8712..0f026af 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -495,8 +495,6 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
case AREF_OTHER: /* ??? */
break;
}
- /*printk ("chan=%d r=%d a=%d -> 0x%x\n",
- chan, range, aref, r); */
return r;
}
@@ -606,7 +604,6 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
- /*printk ("rtd520: Got 0x%x after %d usec\n", d, ii+1); */
d = d >> 3; /* low 3 bits are marker lines */
if (test_bit(0, devpriv->chan_is_bipolar))
/* convert to comedi unsigned data */
@@ -692,7 +689,7 @@ static int ai_read_dregs(struct comedi_device *dev, struct comedi_subdevice *s)
static irqreturn_t rtd_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
struct rtd_private *devpriv = dev->private;
u32 overrun;
u16 status;
@@ -1427,7 +1424,7 @@ static int rtd520_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &rtd520_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(rtd520_pci_table) = {
+static const struct pci_device_id rtd520_pci_table[] = {
{ PCI_VDEVICE(RTD, 0x7520), BOARD_DM7520 },
{ PCI_VDEVICE(RTD, 0x4520), BOARD_PCI4520 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index b486099..19da1db 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -199,7 +199,7 @@ static bool s626_mc_test(struct comedi_device *dev,
static const struct comedi_lrange s626_range_table = {
2, {
BIP_RANGE(5),
- BIP_RANGE(10),
+ BIP_RANGE(10)
}
};
@@ -1614,12 +1614,13 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
{
struct s626_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
uint32_t *rps;
uint32_t jmp_adrs;
uint16_t i;
uint16_t n;
uint32_t local_ppl;
- struct comedi_cmd *cmd = &dev->subdevices->async->cmd;
/* Stop RPS program in case it is currently running */
s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
@@ -2079,12 +2080,6 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd == NULL)
return -EINVAL;
- if (dev->irq == 0) {
- comedi_error(dev,
- "s626_ai_cmd: cannot run command without an irq");
- return -EIO;
- }
-
s626_ai_load_polllist(ppl, cmd);
devpriv->ai_cmd_running = 1;
devpriv->ai_convert_count = 0;
@@ -2645,6 +2640,7 @@ static void s626_initialize(struct comedi_device *dev)
* a defined state after a PCI reset.
*/
{
+ struct comedi_subdevice *s = dev->read_subdev;
uint8_t poll_list;
uint16_t adc_data;
uint16_t start_val;
@@ -2656,7 +2652,7 @@ static void s626_initialize(struct comedi_device *dev)
s626_reset_adc(dev, &poll_list);
/* Get initial ADC value */
- s626_ai_rinsn(dev, dev->subdevices, NULL, data);
+ s626_ai_rinsn(dev, s, NULL, data);
start_val = data[0];
/*
@@ -2670,7 +2666,7 @@ static void s626_initialize(struct comedi_device *dev)
* being unusually quiet or at the rail.
*/
for (index = 0; index < 500; index++) {
- s626_ai_rinsn(dev, dev->subdevices, NULL, data);
+ s626_ai_rinsn(dev, s, NULL, data);
adc_data = data[0];
if (adc_data != start_val)
break;
@@ -2833,7 +2829,7 @@ static int s626_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = S626_ADC_CHANNELS;
s->maxdata = 0x3fff;
s->range_table = &s626_range_table;
@@ -2841,6 +2837,7 @@ static int s626_auto_attach(struct comedi_device *dev,
s->insn_read = s626_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = s626_ai_cmd;
s->do_cmdtest = s626_ai_cmdtest;
s->cancel = s626_ai_cancel;
@@ -2965,7 +2962,7 @@ static int s626_pci_probe(struct pci_dev *dev,
* also subvendor:subdevice ids, because otherwise it will conflict with
* Philips SAA7146 media/dvb based cards.
*/
-static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = {
+static const struct pci_device_id s626_pci_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146,
0x6000, 0x0272) },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index daee2f4..77e2059 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -698,7 +698,7 @@ static int skel_pci_probe(struct pci_dev *dev,
* This is used by modprobe to translate PCI IDs to drivers.
* Should only be used for PCI and ISA-PnP devices
*/
-static DEFINE_PCI_DEVICE_TABLE(skel_pci_table) = {
+static const struct pci_device_id skel_pci_table[] = {
{ PCI_VDEVICE(SKEL, 0x0100), BOARD_SKEL100 },
{ PCI_VDEVICE(SKEL, 0x0200), BOARD_SKEL200 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 93eec2f..adf7cb7 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -38,7 +38,6 @@ Devices: [Fastwel] UNIOxx-5 (unioxx5),
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/delay.h>
@@ -91,12 +90,14 @@ static int __unioxx5_define_chan_offset(int chan_num)
}
#if 0 /* not used? */
-static void __unioxx5_digital_config(struct unioxx5_subd_priv *usp, int mode)
+static void __unioxx5_digital_config(struct comedi_subdevice *s, int mode)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int i, mask;
mask = (mode == ALL_2_OUTPUT) ? 0xFF : 0x00;
- printk("COMEDI: mode = %d\n", mask);
+ dev_dbg(csdev, "mode = %d\n", mask);
outb(1, usp->usp_iobase + 0);
@@ -135,15 +136,18 @@ static void __unioxx5_analog_config(struct unioxx5_subd_priv *usp, int channel)
usp->usp_prev_cn_val[channel_offset - 1] = conf;
}
-static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp,
+static int __unioxx5_digital_read(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int channel_offset, mask = 1 << (channel & 0x07);
channel_offset = __unioxx5_define_chan_offset(channel);
if (channel_offset < 0) {
- pr_err("comedi%d: undefined channel %d. channel range is 0 .. 23\n",
- minor, channel);
+ dev_err(csdev,
+ "undefined channel %d. channel range is 0 .. 23\n",
+ channel);
return 0;
}
@@ -157,9 +161,11 @@ static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp,
return 1;
}
-static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
+static int __unioxx5_analog_read(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int module_no, read_ch;
char control;
@@ -168,8 +174,9 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
/* defining if given module can work on input */
if (usp->usp_module_type[module_no] & MODULE_OUTPUT_MASK) {
- pr_err("comedi%d: module in position %d with id 0x%02x is for output only",
- minor, module_no, usp->usp_module_type[module_no]);
+ dev_err(csdev,
+ "module in position %d with id 0x%02x is for output only",
+ module_no, usp->usp_module_type[module_no]);
return 0;
}
@@ -185,7 +192,7 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
/* if four bytes readding error occurs - return 0(false) */
if ((control & Rx4CA_ERR_MASK)) {
- printk("COMEDI: 4 bytes error\n");
+ dev_err(csdev, "4 bytes error\n");
return 0;
}
@@ -197,16 +204,19 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
return 1;
}
-static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp,
+static int __unioxx5_digital_write(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int channel_offset, val;
int mask = 1 << (channel & 0x07);
channel_offset = __unioxx5_define_chan_offset(channel);
if (channel_offset < 0) {
- pr_err("comedi%d: undefined channel %d. channel range is 0 .. 23\n",
- minor, channel);
+ dev_err(csdev,
+ "undefined channel %d. channel range is 0 .. 23\n",
+ channel);
return 0;
}
@@ -225,9 +235,11 @@ static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp,
return 1;
}
-static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp,
+static int __unioxx5_analog_write(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int module, i;
module = channel / 2; /* definig module number(0 .. 11) */
@@ -235,8 +247,9 @@ static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp,
/* defining if given module can work on output */
if (!(usp->usp_module_type[module] & MODULE_OUTPUT_MASK)) {
- pr_err("comedi%d: module in position %d with id 0x%0x is for input only!\n",
- minor, module, usp->usp_module_type[module]);
+ dev_err(csdev,
+ "module in position %d with id 0x%0x is for input only!\n",
+ module, usp->usp_module_type[module]);
return 0;
}
@@ -273,10 +286,10 @@ static int unioxx5_subdev_read(struct comedi_device *dev,
type = usp->usp_module_type[channel / 2];
if (type == MODULE_DIGITAL) {
- if (!__unioxx5_digital_read(usp, data, channel, dev->minor))
+ if (!__unioxx5_digital_read(subdev, data, channel, dev->minor))
return -1;
} else {
- if (!__unioxx5_analog_read(usp, data, channel, dev->minor))
+ if (!__unioxx5_analog_read(subdev, data, channel, dev->minor))
return -1;
}
@@ -295,10 +308,10 @@ static int unioxx5_subdev_write(struct comedi_device *dev,
type = usp->usp_module_type[channel / 2];
if (type == MODULE_DIGITAL) {
- if (!__unioxx5_digital_write(usp, data, channel, dev->minor))
+ if (!__unioxx5_digital_write(subdev, data, channel, dev->minor))
return -1;
} else {
- if (!__unioxx5_analog_write(usp, data, channel, dev->minor))
+ if (!__unioxx5_analog_write(subdev, data, channel, dev->minor))
return -1;
}
@@ -318,16 +331,15 @@ static int unioxx5_insn_config(struct comedi_device *dev,
if (type != MODULE_DIGITAL) {
dev_err(dev->class_dev,
- "comedi%d: channel configuration accessible only for digital modules\n",
- dev->minor);
+ "channel configuration accessible only for digital modules\n");
return -1;
}
channel_offset = __unioxx5_define_chan_offset(channel);
if (channel_offset < 0) {
dev_err(dev->class_dev,
- "comedi%d: undefined channel %d. channel range is 0 .. 23\n",
- dev->minor, channel);
+ "undefined channel %d. channel range is 0 .. 23\n",
+ channel);
return -1;
}
@@ -342,8 +354,7 @@ static int unioxx5_insn_config(struct comedi_device *dev,
flags |= mask;
break;
default:
- dev_err(dev->class_dev,
- "comedi%d: unknown flag\n", dev->minor);
+ dev_err(dev->class_dev, "unknown flag\n");
return -1;
}
@@ -435,9 +446,10 @@ static int unioxx5_attach(struct comedi_device *dev,
dev->iobase = iobase;
iobase += UNIOXX5_SUBDEV_BASE;
+ n_subd = 0;
- /* defining number of subdevices and getting they types (it must be 'g01') */
- for (i = n_subd = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) {
+ /* getting number of subdevices with types 'g01' */
+ for (i = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) {
id = inb(ba + 0xE);
num = inb(ba + 0xF);
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index da1d501..71db683 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -80,7 +80,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 9707dd1..5f85c55 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
@@ -138,7 +137,10 @@
* comedi constants
*/
static const struct comedi_lrange range_usbduxfast_ai_range = {
- 2, {BIP_RANGE(0.75), BIP_RANGE(0.5)}
+ 2, {
+ BIP_RANGE(0.75),
+ BIP_RANGE(0.5)
+ }
};
/*
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a5363de..3beeb12 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -43,7 +43,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
@@ -1656,11 +1655,13 @@ static int usbduxsigma_auto_attach(struct comedi_device *dev,
}
offset = usbduxsigma_getstatusinfo(dev, 0);
- if (offset < 0)
+ if (offset < 0) {
dev_err(dev->class_dev,
- "Communication to USBDUXSIGMA failed! Check firmware and cabling\n");
+ "Communication to USBDUXSIGMA failed! Check firmware and cabling.\n");
+ return offset;
+ }
- dev_info(dev->class_dev, "attached, ADC_zero = %x\n", offset);
+ dev_info(dev->class_dev, "ADC_zero = %x\n", offset);
return 0;
}
diff --git a/drivers/staging/comedi/kcomedilib/Makefile b/drivers/staging/comedi/kcomedilib/Makefile
index 18ee99b..3aff8ed 100644
--- a/drivers/staging/comedi/kcomedilib/Makefile
+++ b/drivers/staging/comedi/kcomedilib/Makefile
@@ -1,3 +1,5 @@
+ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
+
obj-$(CONFIG_COMEDI_KCOMEDILIB) += kcomedilib.o
kcomedilib-objs := kcomedilib_main.o
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index cd60677..7dc5a18 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
struct comedi_device *comedi_open(const char *filename)
{
- struct comedi_device *dev;
+ struct comedi_device *dev, *retval = NULL;
unsigned int minor;
if (strncmp(filename, "/dev/comedi", 11) != 0)
@@ -46,24 +46,27 @@ struct comedi_device *comedi_open(const char *filename)
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
- dev = comedi_dev_from_minor(minor);
-
- if (!dev || !dev->attached)
+ dev = comedi_dev_get_from_minor(minor);
+ if (!dev)
return NULL;
- if (!try_module_get(dev->driver->module))
- return NULL;
+ down_read(&dev->attach_lock);
+ if (dev->attached)
+ retval = dev;
+ else
+ retval = NULL;
+ up_read(&dev->attach_lock);
+
+ if (retval == NULL)
+ comedi_dev_put(dev);
- return dev;
+ return retval;
}
EXPORT_SYMBOL_GPL(comedi_open);
-int comedi_close(struct comedi_device *d)
+int comedi_close(struct comedi_device *dev)
{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- module_put(dev->driver->module);
-
+ comedi_dev_put(dev);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_close);
@@ -73,7 +76,14 @@ static int comedi_do_insn(struct comedi_device *dev,
unsigned int *data)
{
struct comedi_subdevice *s;
- int ret = 0;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ if (!dev->attached) {
+ ret = -EINVAL;
+ goto error;
+ }
/* a subdevice instruction */
if (insn->subdev >= dev->n_subdevices) {
@@ -120,6 +130,7 @@ static int comedi_do_insn(struct comedi_device *dev,
s->busy = NULL;
error:
+ mutex_unlock(&dev->mutex);
return ret;
}
@@ -169,9 +180,6 @@ int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
unsigned int shift;
int ret;
- if (subdev >= dev->n_subdevices)
- return -EINVAL;
-
base_channel = CR_CHAN(base_channel);
n_chan = comedi_get_n_channels(dev, subdev);
if (base_channel >= n_chan)
@@ -211,23 +219,33 @@ int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
{
struct comedi_subdevice *s;
-
- if (subd > dev->n_subdevices)
- return -ENODEV;
-
- for (; subd < dev->n_subdevices; subd++) {
- s = &dev->subdevices[subd];
- if (s->type == type)
- return subd;
- }
- return -1;
+ int ret = -ENODEV;
+
+ down_read(&dev->attach_lock);
+ if (dev->attached)
+ for (; subd < dev->n_subdevices; subd++) {
+ s = &dev->subdevices[subd];
+ if (s->type == type) {
+ ret = subd;
+ break;
+ }
+ }
+ up_read(&dev->attach_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
{
- struct comedi_subdevice *s = &dev->subdevices[subdevice];
+ int n;
+
+ down_read(&dev->attach_lock);
+ if (!dev->attached || subdevice >= dev->n_subdevices)
+ n = 0;
+ else
+ n = dev->subdevices[subdevice].n_chan;
+ up_read(&dev->attach_lock);
- return s->n_chan;
+ return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index ade0003..da6bc58 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -41,16 +41,20 @@ static int comedi_read(struct seq_file *m, void *v)
"driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device *dev = comedi_dev_from_minor(i);
+ struct comedi_device *dev = comedi_dev_get_from_minor(i);
+
if (!dev)
continue;
+ down_read(&dev->attach_lock);
if (dev->attached) {
devices_q = 1;
seq_printf(m, "%2d: %-20s %-20s %4d\n",
i, dev->driver->driver_name,
dev->board_name, dev->n_subdevices);
}
+ up_read(&dev->attach_lock);
+ comedi_dev_put(dev);
}
if (!devices_q)
seq_puts(m, "no devices\n");
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 8fde554..46b3da6 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -83,8 +83,10 @@ int do_rangeinfo_ioctl(struct comedi_device *dev,
}
if (RANGE_LENGTH(it.range_type) != lr->length) {
- DPRINTK("wrong length %d should be %d (0x%08x)\n",
- RANGE_LENGTH(it.range_type), lr->length, it.range_type);
+ dev_dbg(dev->class_dev,
+ "wrong length %d should be %d (0x%08x)\n",
+ RANGE_LENGTH(it.range_type),
+ lr->length, it.range_type);
return -EINVAL;
}
@@ -123,7 +125,8 @@ static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
default:
break;
}
- DPRINTK("subdevice does not support aref %i", aref);
+ dev_dbg(s->device->class_dev, "subdevice does not support aref %i",
+ aref);
return 1;
}
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index 981708f..92b0cff 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -229,7 +229,7 @@ enum BC_DRV_CMD {
DRV_CMD_REG_RD, /* Read Device Register */
DRV_CMD_REG_WR, /* Write Device Register */
DRV_CMD_FPGA_RD, /* Read FPGA Register */
- DRV_CMD_FPGA_WR, /* Wrtie FPGA Reister */
+ DRV_CMD_FPGA_WR, /* Write FPGA Register */
DRV_CMD_MEM_RD, /* Read Device Memory */
DRV_CMD_MEM_WR, /* Write Device Memory */
DRV_CMD_RD_PCI_CFG, /* Read PCI Config Space */
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 07a2f24..642f438 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -798,7 +798,7 @@ static const struct crystalhd_cmd_tbl g_crystalhd_cproc_tbl[] = {
*
* Current gstreamer frame work does not provide any power management
* related notification to user mode decoder plug-in. As a work-around
- * we pass on the power mangement notification to our plug-in by completing
+ * we pass on the power management notification to our plug-in by completing
* all outstanding requests with BC_STS_IO_USER_ABORT return code.
*/
enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx,
@@ -1059,7 +1059,7 @@ bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx)
{
if (!ctx) {
BCMLOG_ERR("Invalid arg..\n");
- return 0;
+ return false;
}
return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx);
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index 377cd9d..b5bf59d 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -29,7 +29,7 @@
/*
* NOTE:: This is the main interface file between the Linux layer
- * and the harware layer. This file will use the definitions
+ * and the hardware layer. This file will use the definitions
* from _dts_glob and dts_defs etc.. which are defined for
* windows.
*/
diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h
index 4b363a5..05615e2 100644
--- a/drivers/staging/crystalhd/crystalhd_fw_if.h
+++ b/drivers/staging/crystalhd/crystalhd_fw_if.h
@@ -115,7 +115,7 @@ struct fgt_sei {
unsigned char model_id; /* Model id. */
/* +unused SE based on Thomson spec */
- unsigned char color_desc_flag; /* Separate color descrition flag. */
+ unsigned char color_desc_flag; /* Separate color description flag. */
unsigned char bit_depth_luma; /* Bit depth luma minus 8. */
unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */
unsigned char full_range_flag; /* Full range flag. */
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 043bd49..8d0680d 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -398,7 +398,7 @@ static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
* Call back from TX - IOQ deletion.
*
* This routine will release the TX DMA rings allocated
- * druing setup_dma rings interface.
+ * during setup_dma rings interface.
*
* Memory is allocated per DMA ring basis. This is just
* a place holder to be able to create the dio queues.
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 3780944..d5cb68d 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -46,7 +46,7 @@
#define Cpu2HstMbx1 0x00100F04
#define MbxStat1 0x00100F08
#define Stream2Host_Intr_Sts 0x00100F24
-#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */
+#define C011_RET_SUCCESS 0x0 /* Return status of firmware command. */
/* TS input status register */
#define TS_StreamAFIFOStatus 0x0010044C
@@ -141,7 +141,7 @@ union link_misc_perst_deco_ctrl {
uint32_t reserved0:3; /* Reserved.No Effect*/
uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of
27MHz clk used to clk BCM7412*/
- uint32_t reserved1:27; /* Reseved. No Effect*/
+ uint32_t reserved1:27; /* Reserved. No Effect*/
};
uint32_t whole_reg;
@@ -176,7 +176,7 @@ union link_misc_perst_decoder_ctrl {
uint32_t res0:3; /* Reserved.No Effect*/
uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz
clk used to clk BCM7412*/
- uint32_t res1:27; /* Reseved. No Effect */
+ uint32_t res1:27; /* Reserved. No Effect */
};
uint32_t whole_reg;
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 190b9b9..99eefd0 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -703,7 +703,7 @@ static int chd_dec_pci_resume(struct pci_dev *pdev)
}
#endif
-static DEFINE_PCI_DEVICE_TABLE(chd_dec_pci_id_table) = {
+static const struct pci_device_id chd_dec_pci_id_table[] = {
{ PCI_VDEVICE(BROADCOM, 0x1612), 8 },
{ 0, },
};
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index bac572a..816e1cd 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -37,7 +37,6 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
@@ -53,7 +52,7 @@
/* OS specific PCI information structure and adapter information. */
struct crystalhd_adp {
- /* Hardware borad/PCI specifics */
+ /* Hardware board/PCI specifics */
char name[32];
struct pci_dev *pdev;
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index 51f6980..c3d0244 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -389,7 +389,7 @@ void *bc_kern_dma_alloc(struct crystalhd_adp *adp, uint32_t sz,
void *temp = NULL;
if (!adp || !sz || !phy_addr) {
- BCMLOG_ERR("Invalide Arg..\n");
+ BCMLOG_ERR("Invalid Arg..\n");
return temp;
}
@@ -415,7 +415,7 @@ void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka,
dma_addr_t phy_addr)
{
if (!adp || !ka || !sz || !phy_addr) {
- BCMLOG_ERR("Invalide Arg..\n");
+ BCMLOG_ERR("Invalid Arg..\n");
return;
}
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index aa736c8..77ab72a 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -206,7 +206,7 @@ extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff,
enum _chd_log_levels {
BCMLOG_ERROR = 0x80000000, /* Don't disable this option */
BCMLOG_DATA = 0x40000000, /* Data, enable by default */
- BCMLOG_SPINLOCK = 0x20000000, /* Spcial case for Spin locks*/
+ BCMLOG_SPINLOCK = 0x20000000, /* Special case for Spin locks*/
/* Following are allowed only in debug mode */
BCMLOG_INFO = 0x00000001, /* Generic informational */
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index 46a0d92..c4c8c0f 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -28,9 +28,9 @@ extern int cxt1e1_log_level;
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
-static void SetPwrLevel(comet_t *comet);
-static void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table);
-static void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+static void SetPwrLevel(struct s_comet_reg *comet);
+static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table);
+static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
void *TWV_table[12] = {
@@ -58,7 +58,7 @@ lbo_tbl_lkup(int t1, int lbo) {
return lbo - 1;
}
-void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
+void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int clockmaster,
u_int8_t moreParams)
{
u_int8_t isT1mode;
@@ -159,8 +159,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* 60: t1 ALMI cfg */
/* Configure Line Coding */
- switch (port_mode)
- {
+ switch (port_mode) {
/* 1 - T1 B8ZS */
case CFG_FRAME_SF:
pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
@@ -286,8 +285,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
/* note "rate bits can only be set once after reset" */
- if (clockmaster)
- {
+ if (clockmaster) {
/* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */
/* rate = 1.544 Mb/s */
if (isT1mode)
@@ -302,16 +300,17 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* Master Mode i.e.FPMODE=0 (@0x20) */
pci_write_32((u_int32_t *) &comet->brif_fpcfg, 0x00);
- if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
- {
+ if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL) {
if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockmaster internal clock\n", __func__);
+ pr_info(">> %s: clockmaster internal clock\n",
+ __func__);
/* internal oscillator */
pci_write_32((u_int32_t *) &comet->tx_time, 0x0d);
} else {
/* external clock source */
if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockmaster external clock\n", __func__);
+ pr_info(">> %s: clockmaster external clock\n",
+ __func__);
/* loop timing(external) */
pci_write_32((u_int32_t *) &comet->tx_time, 0x09);
}
@@ -399,7 +398,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
** Returns: Nothing
*/
static void
-WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
+WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
{
u_int8_t WaveformAddr;
@@ -417,19 +416,20 @@ WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int
** Returns: Nothing
*/
static void
-WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
+WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet,
u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
{
u_int32_t sample, unit;
- for (sample = 0; sample < COMET_NUM_SAMPLES; sample++)
- {
+ for (sample = 0; sample < COMET_NUM_SAMPLES; sample++) {
for (unit = 0; unit < COMET_NUM_UNITS; unit++)
- WrtXmtWaveform(ci, comet, sample, unit, table[sample][unit]);
+ WrtXmtWaveform(ci, comet, sample, unit,
+ table[sample][unit]);
}
/* Enable transmitter and set output amplitude */
- pci_write_32((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]);
+ pci_write_32((u_int32_t *) &comet->xlpg_cfg,
+ table[COMET_NUM_SAMPLES][0]);
}
@@ -444,7 +444,7 @@ WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
*/
static void
-WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
+WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
{
u_int32_t ramaddr;
volatile u_int32_t value;
@@ -457,7 +457,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
/* write the addr, initiate a read */
- pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
+ pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr,
+ (u_int8_t) ramaddr);
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
/*
@@ -470,9 +471,12 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
}
value = *table++;
- pci_write_32((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24));
- pci_write_32((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16));
- pci_write_32((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8));
+ pci_write_32((u_int32_t *) &comet->rlps_idata3,
+ (u_int8_t) (value >> 24));
+ pci_write_32((u_int32_t *) &comet->rlps_idata2,
+ (u_int8_t) (value >> 16));
+ pci_write_32((u_int32_t *) &comet->rlps_idata1,
+ (u_int8_t) (value >> 8));
pci_write_32((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value);
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
@@ -484,7 +488,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
/* for write order preservation when optimizing driver */
pci_flush_write(ci);
/* write the addr, initiate a read */
- pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
+ pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr,
+ (u_int8_t) ramaddr);
/* for write order preservation when optimizing driver */
pci_flush_write(ci);
@@ -508,7 +513,7 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
*/
static void
-SetPwrLevel(comet_t *comet)
+SetPwrLevel(struct s_comet_reg *comet)
{
volatile u_int32_t temp;
@@ -550,12 +555,11 @@ SetPwrLevel(comet_t *comet)
*/
#if 0
static void
-SetCometOps(comet_t *comet)
+SetCometOps(struct s_comet_reg *comet)
{
volatile u_int8_t rd_value;
- if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2))
- {
+ if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) {
/* read the BRIF Configuration */
rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
rd_value &= ~0x20;
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
index 03b9bb7..d5d286e 100644
--- a/drivers/staging/cxt1e1/comet.h
+++ b/drivers/staging/cxt1e1/comet.h
@@ -25,304 +25,313 @@
#define VINT32 volatile u_int32_t
-struct s_comet_reg
-{
- VINT32 gbl_cfg; /* 00 Global Cfg */
- VINT32 clkmon; /* 01 Clk Monitor */
- VINT32 rx_opt; /* 02 RX Options */
- VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */
- VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */
- VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */
- VINT32 tx_time; /* 06 TX Timing Options */
- VINT32 intr_1; /* 07 Intr Source #1 */
- VINT32 intr_2; /* 08 Intr Source #2 */
- VINT32 intr_3; /* 09 Intr Source #3 */
- VINT32 mdiag; /* 0A Master Diagnostics */
- VINT32 mtest; /* 0B Master Test */
- VINT32 adiag; /* 0C Analog Diagnostics */
- VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */
+struct s_comet_reg {
+ VINT32 gbl_cfg; /* 00 Global Cfg */
+ VINT32 clkmon; /* 01 Clk Monitor */
+ VINT32 rx_opt; /* 02 RX Options */
+ VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */
+ VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */
+ VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */
+ VINT32 tx_time; /* 06 TX Timing Options */
+ VINT32 intr_1; /* 07 Intr Source #1 */
+ VINT32 intr_2; /* 08 Intr Source #2 */
+ VINT32 intr_3; /* 09 Intr Source #3 */
+ VINT32 mdiag; /* 0A Master Diagnostics */
+ VINT32 mtest; /* 0B Master Test */
+ VINT32 adiag; /* 0C Analog Diagnostics */
+ VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */
#define pmon rev_id
- VINT32 reset; /* 0E Reset */
- VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */
- VINT32 cdrc_cfg; /* 10 CDRC Cfg */
- VINT32 cdrc_ien; /* 11 CDRC Intr Enable */
- VINT32 cdrc_ists; /* 12 CDRC Intr Sts */
- VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */
-
- VINT32 rjat_ists; /* 14 RJAT Intr Sts */
- VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */
- VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */
- VINT32 rjat_cfg; /* 17 RJAT Cfg */
-
- VINT32 tjat_ists; /* 18 TJAT Intr Sts */
- VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */
- VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */
- VINT32 tjat_cfg; /* 1B TJAT Cfg */
-
- VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */
- VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */
- VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */
- VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */
-
- VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */
- VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */
- VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */
- VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */
- VINT32 __res24; /* 24 Reserved */
- VINT32 __res25; /* 25 Reserved */
- VINT32 __res26; /* 26 Reserved */
- VINT32 __res27; /* 27 Reserved */
-
- VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */
- VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */
- VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */
- VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */
- VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */
- VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */
- VINT32 _rxce_res2E; /* 2E RXCE Reserved */
- VINT32 _rxce_res2F; /* 2F RXCE Reserved */
-
- VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */
- VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */
- VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */
- VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */
- VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */
- VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */
- VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */
- VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */
-
- VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */
- VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */
- VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */
- VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */
- VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */
- VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */
- VINT32 _txci_res3E; /* 3E TXCI Reserved */
- VINT32 _txci_res3F; /* 3F TXCI Reserved */
-
- VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */
- VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */
- VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */
- VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */
- VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */
- VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */
- VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */
- VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */
- VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */
- VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */
- VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */
- VINT32 __res_4B; /* 4B Reserved */
- VINT32 ibcd_cfg; /* 4C IBCD Cfg */
- VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */
- VINT32 ibcd_act; /* 4E IBCD Activate Code */
- VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */
-
- VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */
- VINT32 sigx_acc_cos; /* 51 SIGX uP Access Sts/Change of Signaling State */
- VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect
- * Addr/Ctl/Change of Signaling State */
- VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data
- * Buffer/Change of Signaling State */
-
- VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */
- VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */
- VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */
- VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */
-
- VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */
- VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */
- VINT32 pmon_feb_lsb; /* 5A PMON OFF/COFA/Far End Block Err Cnt (LSB) */
- VINT32 pmon_feb_msb; /* 5B PMON OFF/COFA/Far End Block Err Cnt (MSB) */
- VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */
- VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */
- VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */
- VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */
-
- VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */
- VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */
- VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */
- VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */
-
- VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */
- VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */
- VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */
- VINT32 t1_xboc_code; /* 67 T1 XBOC Code */
- VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */
- VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */
-
- VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */
- VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */
-
- VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */
- VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */
- VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect
- * Addr/Ctl */
- VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data
- * Buffer */
- VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */
- VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */
- VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect
- * Addr/Ctl */
- VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data
- * Buffer */
- VINT32 __res74; /* 74 Reserved */
- VINT32 __res75; /* 75 Reserved */
- VINT32 __res76; /* 76 Reserved */
- VINT32 __res77; /* 77 Reserved */
-
- VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */
- VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */
- VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */
- VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */
- VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */
- VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */
- VINT32 t1_aprm_1sec_5; /* 7E T1 APRM One Second Content MSB (Octect 5) */
- VINT32 t1_aprm_1sec_6; /* 7F T1 APRM One Second Content MSB (Octect 6) */
-
- VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */
- VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */
- VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */
- VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */
- VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */
- VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */
- VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword
- * Select */
- VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */
- VINT32 __res88; /* 88 Reserved */
- VINT32 __res89; /* 89 Reserved */
- VINT32 __res8A; /* 8A Reserved */
- VINT32 __res8B; /* 8B Reserved */
-
- VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */
- VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */
- VINT32 __res8E; /* 8E Reserved */
- VINT32 __res8F; /* 8F Reserved */
-
- VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */
- VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */
- VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */
- VINT32 e1_frmr_mien; /* 93 E1 FRMR Maintenance/Alarm Sts Intr Enable */
- VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */
- VINT32 e1_frmr_mists; /* 95 E1 FRMR Maintenance/Alarm Sts Indication Enable */
- VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */
- VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */
- VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */
- VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */
- VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */
- VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR National Bit Codeword Intr Enables */
- VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR National Bit Codeword Intr/Sts */
- VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */
- VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR Frame Pulse/Alarm Intr Enables */
- VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */
-
- VINT32 __resA0; /* A0 Reserved */
- VINT32 __resA1; /* A1 Reserved */
- VINT32 __resA2; /* A2 Reserved */
- VINT32 __resA3; /* A3 Reserved */
- VINT32 __resA4; /* A4 Reserved */
- VINT32 __resA5; /* A5 Reserved */
- VINT32 __resA6; /* A6 Reserved */
- VINT32 __resA7; /* A7 Reserved */
-
- VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */
- VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */
- VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */
- VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */
- VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */
- VINT32 tdpr1_data; /* AD TDPR #1 TX Data */
- VINT32 __resAE; /* AE Reserved */
- VINT32 __resAF; /* AF Reserved */
- VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */
- VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */
- VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */
- VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */
- VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */
- VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */
- VINT32 __resB6; /* B6 Reserved */
- VINT32 __resB7; /* B7 Reserved1 */
- VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */
- VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */
- VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */
- VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */
- VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */
- VINT32 tdpr3_data; /* BD TDPR #3 TX Data */
- VINT32 __resBE; /* BE Reserved */
- VINT32 __resBF; /* BF Reserved */
-
- VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */
- VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */
- VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */
- VINT32 rdlc1_data; /* C3 RDLC #1 Data */
- VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */
- VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */
- VINT32 __resC6; /* C6 Reserved */
- VINT32 __resC7; /* C7 Reserved */
- VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */
- VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */
- VINT32 rdlc2_sts; /* CA RDLC #2 Sts */
- VINT32 rdlc2_data; /* CB RDLC #2 Data */
- VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */
- VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */
- VINT32 __resCE; /* CE Reserved */
- VINT32 __resCF; /* CF Reserved */
- VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */
- VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */
- VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */
- VINT32 rdlc3_data; /* D3 RDLC #3 Data */
- VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */
- VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */
-
- VINT32 csu_cfg; /* D6 CSU Cfg */
- VINT32 _csu_resD7; /* D7 CSU Reserved */
-
- VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */
- VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */
- VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */
- VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */
- VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference
- * (E1 missing) */
- VINT32 _rlps_resDD; /* DD RLPS Reserved */
- VINT32 _rlps_resDE; /* DE RLPS Reserved */
- VINT32 _rlps_resDF; /* DF RLPS Reserved */
-
- VINT32 prgd_ctl; /* E0 PRGD Ctl */
- VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */
- VINT32 prgd_shift_len; /* E2 PRGD Shift Length */
- VINT32 prgd_tap; /* E3 PRGD Tap */
- VINT32 prgd_errin; /* E4 PRGD Err Insertion */
- VINT32 _prgd_resE5; /* E5 PRGD Reserved */
- VINT32 _prgd_resE6; /* E6 PRGD Reserved */
- VINT32 _prgd_resE7; /* E7 PRGD Reserved */
- VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */
- VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */
- VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */
- VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */
- VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */
- VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */
- VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */
- VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */
-
- VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */
- VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */
- VINT32 xlpg_pwave_addr; /* F2 XLPG Pulse Waveform Storage Write Addr */
- VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */
- VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */
- VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */
- VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */
- VINT32 _xlpg_resF7; /* F7 XLPG Reserved */
-
- VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */
- VINT32 rlps_alos_thresh; /* F9 RLPS ALOS Detection/Clearance Threshold */
- VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */
- VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */
- VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */
- VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */
- VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */
- VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */
+ VINT32 reset; /* 0E Reset */
+ VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */
+ VINT32 cdrc_cfg; /* 10 CDRC Cfg */
+ VINT32 cdrc_ien; /* 11 CDRC Intr Enable */
+ VINT32 cdrc_ists; /* 12 CDRC Intr Sts */
+ VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */
+
+ VINT32 rjat_ists; /* 14 RJAT Intr Sts */
+ VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */
+ VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */
+ VINT32 rjat_cfg; /* 17 RJAT Cfg */
+
+ VINT32 tjat_ists; /* 18 TJAT Intr Sts */
+ VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */
+ VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */
+ VINT32 tjat_cfg; /* 1B TJAT Cfg */
+
+ VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */
+ VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */
+ VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */
+ VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */
+
+ VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */
+ VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */
+ VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */
+ VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */
+ VINT32 __res24; /* 24 Reserved */
+ VINT32 __res25; /* 25 Reserved */
+ VINT32 __res26; /* 26 Reserved */
+ VINT32 __res27; /* 27 Reserved */
+
+ VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */
+ VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */
+ VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */
+ VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */
+ VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */
+ VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */
+ VINT32 _rxce_res2E; /* 2E RXCE Reserved */
+ VINT32 _rxce_res2F; /* 2F RXCE Reserved */
+
+ VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */
+ VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */
+ VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */
+ VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */
+ VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */
+ VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */
+ VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */
+ VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */
+
+ VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */
+ VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */
+ VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */
+ VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */
+ VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */
+ VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */
+ VINT32 _txci_res3E; /* 3E TXCI Reserved */
+ VINT32 _txci_res3F; /* 3F TXCI Reserved */
+
+ VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */
+ VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */
+ VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */
+ VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */
+ VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */
+ VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */
+ VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */
+ VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */
+ VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */
+ VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */
+ VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */
+ VINT32 __res_4B; /* 4B Reserved */
+ VINT32 ibcd_cfg; /* 4C IBCD Cfg */
+ VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */
+ VINT32 ibcd_act; /* 4E IBCD Activate Code */
+ VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */
+
+ VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */
+ VINT32 sigx_acc_cos; /* 51 SIGX
+ * uP Access Sts/Change of Signaling State */
+ VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect
+ * Addr/Ctl/Change of Signaling State */
+ VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data
+ * Buffer/Change of Signaling State */
+
+ VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */
+ VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */
+ VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */
+ VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */
+
+ VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */
+ VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */
+ VINT32 pmon_feb_lsb; /* 5A PMON
+ * OFF/COFA/Far End Block Err Cnt (LSB) */
+ VINT32 pmon_feb_msb; /* 5B PMON
+ * OFF/COFA/Far End Block Err Cnt (MSB) */
+ VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */
+ VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */
+ VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */
+ VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */
+
+ VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */
+ VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */
+ VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */
+ VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */
+
+ VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */
+ VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */
+ VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */
+ VINT32 t1_xboc_code; /* 67 T1 XBOC Code */
+ VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */
+ VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */
+
+ VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */
+ VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */
+
+ VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */
+ VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */
+ VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect
+ * Addr/Ctl */
+ VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data
+ * Buffer */
+ VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */
+ VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */
+ VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect
+ * Addr/Ctl */
+ VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data
+ * Buffer */
+ VINT32 __res74; /* 74 Reserved */
+ VINT32 __res75; /* 75 Reserved */
+ VINT32 __res76; /* 76 Reserved */
+ VINT32 __res77; /* 77 Reserved */
+
+ VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */
+ VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */
+ VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */
+ VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */
+ VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */
+ VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */
+ VINT32 t1_aprm_1sec_5; /* 7E T1 APRM
+ * One Second Content MSB (Octect 5) */
+ VINT32 t1_aprm_1sec_6; /* 7F T1 APRM
+ * One Second Content MSB (Octect 6) */
+
+ VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */
+ VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */
+ VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */
+ VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */
+ VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */
+ VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */
+ VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword
+ * Select */
+ VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */
+ VINT32 __res88; /* 88 Reserved */
+ VINT32 __res89; /* 89 Reserved */
+ VINT32 __res8A; /* 8A Reserved */
+ VINT32 __res8B; /* 8B Reserved */
+
+ VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */
+ VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */
+ VINT32 __res8E; /* 8E Reserved */
+ VINT32 __res8F; /* 8F Reserved */
+
+ VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */
+ VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */
+ VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */
+ VINT32 e1_frmr_mien; /* 93 E1 FRMR
+ * Maintenance/Alarm Sts Intr Enable */
+ VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */
+ VINT32 e1_frmr_mists; /* 95 E1 FRMR
+ * Maintenance/Alarm Sts Indication Enable */
+ VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */
+ VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */
+ VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */
+ VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */
+ VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */
+ VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR
+ * National Bit Codeword Intr Enables */
+ VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR
+ * National Bit Codeword Intr/Sts */
+ VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */
+ VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR
+ * Frame Pulse/Alarm Intr Enables */
+ VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */
+
+ VINT32 __resA0; /* A0 Reserved */
+ VINT32 __resA1; /* A1 Reserved */
+ VINT32 __resA2; /* A2 Reserved */
+ VINT32 __resA3; /* A3 Reserved */
+ VINT32 __resA4; /* A4 Reserved */
+ VINT32 __resA5; /* A5 Reserved */
+ VINT32 __resA6; /* A6 Reserved */
+ VINT32 __resA7; /* A7 Reserved */
+
+ VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */
+ VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */
+ VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */
+ VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */
+ VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */
+ VINT32 tdpr1_data; /* AD TDPR #1 TX Data */
+ VINT32 __resAE; /* AE Reserved */
+ VINT32 __resAF; /* AF Reserved */
+ VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */
+ VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */
+ VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */
+ VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */
+ VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */
+ VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */
+ VINT32 __resB6; /* B6 Reserved */
+ VINT32 __resB7; /* B7 Reserved1 */
+ VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */
+ VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */
+ VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */
+ VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */
+ VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */
+ VINT32 tdpr3_data; /* BD TDPR #3 TX Data */
+ VINT32 __resBE; /* BE Reserved */
+ VINT32 __resBF; /* BF Reserved */
+
+ VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */
+ VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */
+ VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */
+ VINT32 rdlc1_data; /* C3 RDLC #1 Data */
+ VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */
+ VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */
+ VINT32 __resC6; /* C6 Reserved */
+ VINT32 __resC7; /* C7 Reserved */
+ VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */
+ VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */
+ VINT32 rdlc2_sts; /* CA RDLC #2 Sts */
+ VINT32 rdlc2_data; /* CB RDLC #2 Data */
+ VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */
+ VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */
+ VINT32 __resCE; /* CE Reserved */
+ VINT32 __resCF; /* CF Reserved */
+ VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */
+ VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */
+ VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */
+ VINT32 rdlc3_data; /* D3 RDLC #3 Data */
+ VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */
+ VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */
+
+ VINT32 csu_cfg; /* D6 CSU Cfg */
+ VINT32 _csu_resD7; /* D7 CSU Reserved */
+
+ VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */
+ VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */
+ VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */
+ VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */
+ VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference
+ * (E1 missing) */
+ VINT32 _rlps_resDD; /* DD RLPS Reserved */
+ VINT32 _rlps_resDE; /* DE RLPS Reserved */
+ VINT32 _rlps_resDF; /* DF RLPS Reserved */
+
+ VINT32 prgd_ctl; /* E0 PRGD Ctl */
+ VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */
+ VINT32 prgd_shift_len; /* E2 PRGD Shift Length */
+ VINT32 prgd_tap; /* E3 PRGD Tap */
+ VINT32 prgd_errin; /* E4 PRGD Err Insertion */
+ VINT32 _prgd_resE5; /* E5 PRGD Reserved */
+ VINT32 _prgd_resE6; /* E6 PRGD Reserved */
+ VINT32 _prgd_resE7; /* E7 PRGD Reserved */
+ VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */
+ VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */
+ VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */
+ VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */
+ VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */
+ VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */
+ VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */
+ VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */
+
+ VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */
+ VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */
+ VINT32 xlpg_pwave_addr; /* F2 XLPG
+ * Pulse Waveform Storage Write Addr */
+ VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */
+ VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */
+ VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */
+ VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */
+ VINT32 _xlpg_resF7; /* F7 XLPG Reserved */
+
+ VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */
+ VINT32 rlps_alos_thresh; /* F9 RLPS
+ * ALOS Detection/Clearance Threshold */
+ VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */
+ VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */
+ VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */
+ VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */
+ VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */
+ VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */
};
-typedef struct s_comet_reg comet_t;
-
/* 00AH: MDIAG Register bit definitions */
#define COMET_MDIAG_ID5 0x40
#define COMET_MDIAG_LBMASK 0x3F
@@ -338,7 +347,7 @@ typedef struct s_comet_reg comet_t;
#ifdef __KERNEL__
extern void
-init_comet(void *, comet_t *, u_int32_t, int, u_int8_t);
+init_comet(void *, struct s_comet_reg *, u_int32_t, int, u_int8_t);
#endif
#endif /* _INC_COMET_H_ */
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index d021b31..95218e28 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -274,7 +274,7 @@ VMETRO_TRACE (void *x)
void
VMETRO_TRIGGER (ci_t *ci, int x)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t data;
comet = ci->port[0].cometbase; /* default to COMET # 0 */
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 0ba8c3a..7a3a30c 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -1,5 +1,5 @@
-unsigned int max_intcnt = 0;
-unsigned int max_bh = 0;
+static unsigned int max_intcnt = 0;
+static unsigned int max_bh = 0;
/*-----------------------------------------------------------------------------
* musycc.c -
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 4028ea1..a9d9575 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -194,7 +194,7 @@ checkPorts (ci_t *ci)
* alarms conflicts with NCOMM's interrupt servicing implementation.
*/
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t value;
u_int32_t copyVal, LEDval;
@@ -507,7 +507,7 @@ c4_cleanup (void)
int
c4_get_portcfg (ci_t *ci)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
int portnum, mask;
u_int32_t wdata, rdata;
@@ -561,7 +561,7 @@ c4_init (ci_t *ci, u_char *func0, u_char *func1)
for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++)
{
pi = &ci->port[portnum];
- pi->cometbase = (comet_t *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
+ pi->cometbase = (struct s_comet_reg *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
pi->reg = (struct musycc_globalr *) ((u_char *) ci->reg + (portnum * 0x800));
pi->portnum = portnum;
pi->p.portnum = portnum;
@@ -693,7 +693,7 @@ c4_init2 (ci_t *ci)
int
c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t loopValue;
comet = ci->port[portnum].cometbase;
@@ -752,7 +752,7 @@ c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
status_t
c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t data;
if (pp->portnum >= ci->max_port)/* sanity check */
diff --git a/drivers/staging/cxt1e1/pmcc4_private.h b/drivers/staging/cxt1e1/pmcc4_private.h
index b2b6e37..7edbd4e 100644
--- a/drivers/staging/cxt1e1/pmcc4_private.h
+++ b/drivers/staging/cxt1e1/pmcc4_private.h
@@ -133,7 +133,7 @@ struct c4_port_info
void *regram_saved; /* Original malloc value may have non-2KB
* boundary. Need to save for use when
* freeing. */
- comet_t *cometbase;
+ struct s_comet_reg *cometbase;
struct sbe_card_info *up;
/*
diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c
index 6ec51bc..97c5c6e 100644
--- a/drivers/staging/cxt1e1/sbeid.c
+++ b/drivers/staging/cxt1e1/sbeid.c
@@ -20,190 +20,185 @@
#include "sbe_bid.h"
char *
-sbeid_get_bdname (ci_t *ci)
+sbeid_get_bdname(ci_t *ci)
{
- char *np = NULL;
+ char *np = NULL;
- switch (ci->brd_id)
- {
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
- np = "wanPTMC-256T3 <E1>";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
- np = "wanPTMC-256T3 <T1>";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
- np = "wanPMC-C4T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
- np = "wanPMC-C2T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
- np = "wanPMC-C1T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
- np = "wanPCI-C4T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
- np = "wanPCI-C2T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
- np = "wanPCI-C1T1E1";
- break;
- default:
- /*** np = "<unknown>"; ***/
- np = "wanPCI-CxT1E1";
- break;
- }
+ switch (ci->brd_id) {
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ np = "wanPTMC-256T3 <E1>";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ np = "wanPTMC-256T3 <T1>";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ np = "wanPMC-C4T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ np = "wanPMC-C2T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ np = "wanPMC-C1T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ np = "wanPCI-C4T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ np = "wanPCI-C2T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ np = "wanPCI-C1T1E1";
+ break;
+ default:
+ /*** np = "<unknown>"; ***/
+ np = "wanPCI-CxT1E1";
+ break;
+ }
- return np;
+ return np;
}
/* given the presetting of brd_id, set the corresponding hdw_id */
void
-sbeid_set_hdwbid (ci_t *ci)
+sbeid_set_hdwbid(ci_t *ci)
{
- /*
- * set SBE's unique hardware identification (for legacy boards might not
- * have this register implemented)
- */
+ /*
+ * set SBE's unique hardware identification (for legacy boards might not
+ * have this register implemented)
+ */
- switch (ci->brd_id)
- {
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
- ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1
- * Version) */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
- ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1
- * Version) */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
- /*
- * This Board ID is a generic identification. Use the found number
- * of ports to further define this hardware.
- */
- switch (ci->max_port)
- {
- default: /* shouldn't need a default, but have one
- * anyway */
- case 4:
- ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */
- break;
- case 2:
- ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
- break;
- case 1:
- ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
- break;
- }
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
- ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
- ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
- break;
+ switch (ci->brd_id) {
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1
+ * Version) */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1
+ * Version) */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ /*
+ * This Board ID is a generic identification. Use the found number
+ * of ports to further define this hardware.
+ */
+ switch (ci->max_port) {
+ default: /* shouldn't need a default, but have one
+ * anyway */
+ case 4:
+ ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */
+ break;
+ case 2:
+ ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
+ break;
+ case 1:
+ ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
+ break;
+ }
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
+ break;
#ifdef SBE_PMCC4_ENABLE
- /*
- * This case is entered as a result of the inability to obtain the
- * <bid> from the board's EEPROM. Assume a PCI board and set
- * <hdsbid> according to the number ofr found ports.
- */
- case 0:
- /* start by assuming 4-port for ZERO casing */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
- /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */
+ /*
+ * This case is entered as a result of the inability to obtain the
+ * <bid> from the board's EEPROM. Assume a PCI board and set
+ * <hdsbid> according to the number ofr found ports.
+ */
+ case 0:
+ /* start by assuming 4-port for ZERO casing */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */
#endif
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
- /*
- * This Board ID is a generic identification. Use the number of
- * found ports to further define this hardware.
- */
- switch (ci->max_port)
- {
- default: /* shouldn't need a default, but have one
- * anyway */
- case 4:
- ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */
- break;
- case 2:
- ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
- break;
- case 1:
- ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
- break;
- }
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
- ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
- ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
- break;
- default:
- /*** bid = "<unknown>"; ***/
- ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */
- break;
- }
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ /*
+ * This Board ID is a generic identification. Use the number of
+ * found ports to further define this hardware.
+ */
+ switch (ci->max_port) {
+ default: /* shouldn't need a default, but have one
+ * anyway */
+ case 4:
+ ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */
+ break;
+ case 2:
+ ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
+ break;
+ case 1:
+ ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
+ break;
+ }
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
+ break;
+ default:
+ /*** bid = "<unknown>"; ***/
+ ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */
+ break;
+ }
}
/* given the presetting of hdw_bid, set the corresponding brd_id */
void
-sbeid_set_bdtype (ci_t *ci)
+sbeid_set_bdtype(ci_t *ci)
{
- /* set SBE's unique PCI VENDOR/DEVID */
- switch (ci->hdw_bid)
- {
- case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3);
- break;
- case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1);
- break;
- case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1);
- break;
- case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1);
- break;
- case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1);
- break;
- case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
- break;
- case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
- break;
- case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
- break;
- case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
- break;
- case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
- break;
+ /* set SBE's unique PCI VENDOR/DEVID */
+ switch (ci->hdw_bid) {
+ case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3);
+ break;
+ case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1);
+ break;
+ case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1);
+ break;
+ case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1);
+ break;
+ case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1);
+ break;
+ case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
+ break;
+ case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
+ break;
+ case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ break;
+ case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
+ break;
+ case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
+ break;
- default:
- /*** hdw_bid = "<unknown>"; ***/
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
- break;
- }
+ default:
+ /*** hdw_bid = "<unknown>"; ***/
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ break;
+ }
}
diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h
index 8809701..484ed72 100644
--- a/drivers/staging/dgap/dgap_conf.h
+++ b/drivers/staging/dgap/dgap_conf.h
@@ -138,7 +138,7 @@
#define CU 91
#define PRINT 92
#define XPRINT 93
-#define CMAJOR 94
+#define CMAJOR 94
#define ALTPIN 95
#define STARTO 96
#define USEINTR 97
@@ -262,9 +262,9 @@ struct cnode {
} module;
char *ttyname;
-
+
char *cuname;
-
+
char *printname;
int majornumber;
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
index 4c1515e..089d017 100644
--- a/drivers/staging/dgap/dgap_driver.c
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -506,7 +506,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
/* get the board structure and prep it */
brd = dgap_Board[dgap_NumBoards] =
- (struct board_t *) dgap_driver_kzmalloc(sizeof(struct board_t), GFP_KERNEL);
+ (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
if (!brd) {
APR(("memory allocation for board structure failed\n"));
return(-ENOMEM);
@@ -514,7 +514,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
/* make a temporary message buffer for the boot messages */
brd->msgbuf = brd->msgbuf_head =
- (char *) dgap_driver_kzmalloc(sizeof(char) * 8192, GFP_KERNEL);
+ (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
if(!brd->msgbuf) {
kfree(brd);
APR(("memory allocation for board msgbuf failed\n"));
@@ -925,20 +925,6 @@ static void dgap_init_globals(void)
/*
- * dgap_driver_kzmalloc()
- *
- * Malloc and clear memory,
- */
-void *dgap_driver_kzmalloc(size_t size, int priority)
-{
- void *p = kmalloc(size, priority);
- if(p)
- memset(p, 0, size);
- return(p);
-}
-
-
-/*
* dgap_mbuf()
*
* Used to print to the message buffer during board init.
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
index 7d631e8..2f7a55a7 100644
--- a/drivers/staging/dgap/dgap_driver.h
+++ b/drivers/staging/dgap/dgap_driver.h
@@ -578,7 +578,6 @@ struct channel_t {
*************************************************************************/
extern int dgap_ms_sleep(ulong ms);
-extern void *dgap_driver_kzmalloc(size_t size, int priority);
extern char *dgap_ioctl_name(int cmd);
extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len);
extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len);
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
index 794cf9d..f75831a 100644
--- a/drivers/staging/dgap/dgap_fep5.c
+++ b/drivers/staging/dgap/dgap_fep5.c
@@ -6,16 +6,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*
* NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
@@ -39,6 +29,7 @@
#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
#include <linux/tty.h>
#include <linux/tty_flip.h> /* For tty_schedule_flip */
+#include <linux/slab.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
#include <linux/sched.h>
@@ -75,7 +66,7 @@ void dgap_do_config_load(uchar __user *uaddr, int len)
char buf[U2BSIZE];
int n;
- to_addr = dgap_config_buf = dgap_driver_kzmalloc(len + 1, GFP_ATOMIC);
+ to_addr = dgap_config_buf = kzalloc(len + 1, GFP_ATOMIC);
if (!dgap_config_buf) {
DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n"));
dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
@@ -99,7 +90,7 @@ void dgap_do_config_load(uchar __user *uaddr, int len)
to_addr += n;
from_addr += n;
n = U2BSIZE;
- }
+ }
dgap_config_buf[orig_len] = '\0';
@@ -130,8 +121,8 @@ int dgap_after_config_loaded(void)
/*
* allocate flip buffer for board.
*/
- dgap_Board[i]->flipbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
- dgap_Board[i]->flipflagbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
+ dgap_Board[i]->flipbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
+ dgap_Board[i]->flipflagbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
}
return rc;
@@ -166,9 +157,9 @@ static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *fro
/* increment counts */
len -= n;
to_addr += n;
- from_addr += n;
+ from_addr += n;
n = U2BSIZE;
- }
+ }
return 0;
}
@@ -195,7 +186,7 @@ void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len)
*/
for (i = 0; i < 16; i++)
writeb(0, addr + POSTAREA + i);
-
+
/*
* Download bios
*/
@@ -364,7 +355,7 @@ static void dgap_do_reset_board(struct board_t *brd)
int i = 0;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) {
- DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
+ DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0));
return;
}
@@ -470,7 +461,7 @@ static void dgap_get_vpd(struct board_t *brd)
/*
* To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
+ * address or'ed with 1 into the PCI Rom Address location.
*/
magic = brd->membase | 0x01;
pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
@@ -492,7 +483,7 @@ static void dgap_get_vpd(struct board_t *brd)
* for the VPD offset.
*/
while (base_offset <= EXPANSION_ROM_SIZE) {
-
+
/*
* Lots of magic numbers here.
*
@@ -551,7 +542,7 @@ static void dgap_get_vpd(struct board_t *brd)
*/
void dgap_poll_tasklet(unsigned long data)
{
- struct board_t *bd = (struct board_t *) data;
+ struct board_t *bd = (struct board_t *) data;
ulong lock_flags;
ulong lock_flags2;
char *vaddr;
@@ -816,13 +807,13 @@ out:
*
*=======================================================================*/
void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds)
-{
+{
char *vaddr = NULL;
struct cm_t *cm_addr = NULL;
uint count;
uint n;
u16 head;
- u16 tail;
+ u16 tail;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -833,7 +824,7 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
if (ch->ch_bd->state == BOARD_FAILED) {
DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__));
return;
- }
+ }
/*
* Make sure the pointers are in range before
@@ -847,13 +838,13 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
cm_addr = (struct cm_t *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
- /*
+ /*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__));
ch->ch_bd->state = BOARD_FAILED;
- return;
+ return;
}
/*
@@ -869,7 +860,7 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
writew(head, &(cm_addr->cm_head));
/*
- * Wait if necessary before updating the head
+ * Wait if necessary before updating the head
* pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
@@ -890,14 +881,14 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
return;
}
udelay(10);
- }
+ }
}
/*=======================================================================
*
* dgap_cmdw - Sends a 1 word command to the FEP.
- *
+ *
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* word - Integer containing word to be sent.
@@ -936,7 +927,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
cm_addr = (struct cm_t *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
- /*
+ /*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
@@ -958,7 +949,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
/*
* Wait if necessary before updating the head
- * pointer to limit the number of outstanding
+ * pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
@@ -978,7 +969,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
return;
}
udelay(10);
- }
+ }
}
@@ -986,7 +977,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
/*=======================================================================
*
* dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
+ *
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* word - Integer containing word to be sent.
@@ -1025,7 +1016,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
cm_addr = (struct cm_t *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
- /*
+ /*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
@@ -1060,7 +1051,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
/*
* Wait if necessary before updating the head
- * pointer to limit the number of outstanding
+ * pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
@@ -1080,7 +1071,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
return;
}
udelay(10);
- }
+ }
}
@@ -1102,7 +1093,7 @@ void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
-
+
/*
* Check parameters.
*/
@@ -1172,9 +1163,9 @@ uint dgap_get_custom_baud(struct channel_t *ch)
/*
* Go get from fep mem, what the fep
- * believes the custom baud rate is.
+ * believes the custom baud rate is.
*/
- offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
+ offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
(ch->ch_portnum * 0x28) + LINE_SPEED));
value = readw(vaddr + offset);
@@ -1210,7 +1201,7 @@ void dgap_firmware_reset_port(struct channel_t *ch)
/*=======================================================================
- *
+ *
* dgap_param - Set Digi parameters.
*
* struct tty_struct * - TTY for port.
@@ -1244,7 +1235,7 @@ int dgap_param(struct tty_struct *tty)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return -ENXIO;
- bs = ch->ch_bs;
+ bs = ch->ch_bs;
if (!bs)
return -ENXIO;
@@ -1284,13 +1275,13 @@ int dgap_param(struct tty_struct *tty)
/*
* Now go get from fep mem, what the fep
- * believes the custom baud rate is.
+ * believes the custom baud rate is.
*/
ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch);
DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed));
- /* Handle transition from B0 */
+ /* Handle transition from B0 */
if (ch->ch_flags & CH_BAUD0) {
ch->ch_flags &= ~(CH_BAUD0);
ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
@@ -1352,7 +1343,7 @@ int dgap_param(struct tty_struct *tty)
baud = 0;
}
- if (baud == 0)
+ if (baud == 0)
baud = 9600;
ch->ch_baud_info = baud;
@@ -1425,7 +1416,7 @@ int dgap_param(struct tty_struct *tty)
dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
}
- /* Handle transition from B0 */
+ /* Handle transition from B0 */
if (ch->ch_flags & CH_BAUD0) {
ch->ch_flags &= ~(CH_BAUD0);
ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
@@ -1475,7 +1466,7 @@ int dgap_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE)
hflow |= D_RTS(ch);
if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
+ hflow |= D_DTR(ch);
if (ch->ch_digi.digi_flags & CTSPACE)
hflow |= D_CTS(ch);
if (ch->ch_digi.digi_flags & DSRPACE)
@@ -1488,7 +1479,7 @@ int dgap_param(struct tty_struct *tty)
/* Okay to have channel and board locks held calling this */
dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
- }
+ }
/*
@@ -1507,7 +1498,7 @@ int dgap_param(struct tty_struct *tty)
}
/*
- * Set modem control lines.
+ * Set modem control lines.
*/
mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
@@ -1524,12 +1515,12 @@ int dgap_param(struct tty_struct *tty)
}
/*
- * Read modem signals, and then call carrier function.
+ * Read modem signals, and then call carrier function.
*/
ch->ch_mistat = readb(&(bs->m_stat));
dgap_carrier(ch);
- /*
+ /*
* Set the start and stop characters.
*/
if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) {
@@ -1542,7 +1533,7 @@ int dgap_param(struct tty_struct *tty)
/*
* Set the Auxiliary start and stop characters.
- */
+ */
if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) {
ch->ch_fepastartc = ch->ch_astartc;
ch->ch_fepastopc = ch->ch_astopc;
@@ -1609,7 +1600,7 @@ void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *
} else {
/* save value examination in next state */
ch->pscan_savechar = c;
- ch->pscan_state = 2;
+ ch->pscan_state = 2;
}
break;
@@ -1637,7 +1628,7 @@ void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *
count += 1;
ch->pscan_state = 0;
- }
+ }
}
*len = count;
DPR_PSCAN(("dgap_parity_scan finish\n"));
@@ -1721,9 +1712,8 @@ static int dgap_event(struct board_t *bd)
/*
* Make sure the interrupt is valid.
*/
- if ( port >= bd->nasync) {
+ if (port >= bd->nasync)
goto next;
- }
if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) {
goto next;
@@ -1779,7 +1769,7 @@ static int dgap_event(struct board_t *bd)
}
/*
- * Process Modem change signals.
+ * Process Modem change signals.
*/
if (reason & IFMODEM) {
ch->ch_mistat = modem;
@@ -1813,7 +1803,7 @@ static int dgap_event(struct board_t *bd)
ch->ch_tun.un_flags &= ~UN_LOW;
if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
+ if ((ch->ch_tun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
@@ -1841,7 +1831,7 @@ static int dgap_event(struct board_t *bd)
if (ch->ch_pun.un_flags & UN_LOW) {
ch->ch_pun.un_flags &= ~UN_LOW;
if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
+ if ((ch->ch_pun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
@@ -1879,7 +1869,7 @@ static int dgap_event(struct board_t *bd)
if (ch->ch_tun.un_flags & UN_EMPTY) {
ch->ch_tun.un_flags &= ~UN_EMPTY;
if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
+ if ((ch->ch_tun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
@@ -1905,7 +1895,7 @@ static int dgap_event(struct board_t *bd)
if (ch->ch_pun.un_flags & UN_EMPTY) {
ch->ch_pun.un_flags &= ~UN_EMPTY;
if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
+ if ((ch->ch_pun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
@@ -1945,4 +1935,4 @@ next:
DGAP_UNLOCK(bd->bd_lock, lock_flags);
return 0;
-}
+}
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
index ff9d194..36fd93d 100644
--- a/drivers/staging/dgap/dgap_parse.c
+++ b/drivers/staging/dgap/dgap_parse.c
@@ -42,6 +42,7 @@
#include "dgap_types.h"
#include "dgap_fep5.h"
#include "dgap_driver.h"
+#include "dgap_parse.h"
#include "dgap_conf.h"
diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c
index 0f9a956..a53db9e 100644
--- a/drivers/staging/dgap/dgap_trace.c
+++ b/drivers/staging/dgap/dgap_trace.c
@@ -17,15 +17,15 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
* This is shared code between Digi's CVS archive and the
* Linux Kernel sources.
* Changing the source just for reformatting needlessly breaks
* our CVS diff history.
*
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
*
*/
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include "dgap_driver.h"
+#include "dgap_trace.h"
#define TRC_TO_CONSOLE 1
@@ -107,16 +108,16 @@ void dgap_tracef(const char *fmt, ...)
dgap_trcbufi = 0;
initd++;
- printk("dgap: tracing enabled - " TRC_DTRC
+ printk("dgap: tracing enabled - " TRC_DTRC
" 0x%lx 0x%x\n",
- (unsigned long)dgap_trcbuf,
+ (unsigned long)dgap_trcbuf,
dgap_trcbuf_size);
}
# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
/*
* This is the less CPU-intensive way to do things. We simply
- * wrap around before we fall off the end of the buffer. A
+ * wrap around before we fall off the end of the buffer. A
* tilde (~) demarcates the current end of the trace.
*
* This method should be used if you are concerned about race
@@ -131,14 +132,14 @@ void dgap_tracef(const char *fmt, ...)
dgap_trcbufi = 0;
}
- strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
+ strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
dgap_trcbufi += lenbuf;
dgap_trcbuf[dgap_trcbufi] = '~';
# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
/*
* This is the more CPU-intensive way to do things. If we
- * venture into the last 1/8 of the buffer, we shift the
+ * venture into the last 1/8 of the buffer, we shift the
* last 7/8 of the buffer forward, wiping out the first 1/8.
* Advantage: No wrap-around, only truncation from the
* beginning.
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
index 2a7a372..39fb4df 100644
--- a/drivers/staging/dgap/dgap_tty.c
+++ b/drivers/staging/dgap/dgap_tty.c
@@ -17,22 +17,22 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
* This is shared code between Digi's CVS archive and the
* Linux Kernel sources.
* Changing the source just for reformatting needlessly breaks
* our CVS diff history.
*
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
*/
/************************************************************************
- *
+ *
* This file implements the tty driver functionality for the
* FEP5 based product lines.
- *
+ *
************************************************************************
*
* $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $
@@ -155,7 +155,7 @@ static const struct tty_operations dgap_tty_ops = {
.flush_chars = dgap_tty_flush_chars,
.ioctl = dgap_tty_ioctl,
.set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
+ .stop = dgap_tty_stop,
.start = dgap_tty_start,
.throttle = dgap_tty_throttle,
.unthrottle = dgap_tty_unthrottle,
@@ -173,11 +173,11 @@ static const struct tty_operations dgap_tty_ops = {
/************************************************************************
- *
+ *
* TTY Initialization/Cleanup Functions
- *
+ *
************************************************************************/
-
+
/*
* dgap_tty_preinit()
*
@@ -187,7 +187,7 @@ int dgap_tty_preinit(void)
{
unsigned long flags;
- DGAP_LOCK(dgap_global_lock, flags);
+ DGAP_LOCK(dgap_global_lock, flags);
/*
* Allocate a buffer for doing the copy from user space to
@@ -202,7 +202,7 @@ int dgap_tty_preinit(void)
DPR_INIT(("unable to allocate tmp write buf"));
return (-ENOMEM);
}
-
+
DGAP_UNLOCK(dgap_global_lock, flags);
return(0);
}
@@ -226,14 +226,14 @@ int dgap_tty_register(struct board_t *brd)
brd->SerialDriver->name_base = 0;
brd->SerialDriver->major = 0;
brd->SerialDriver->minor_start = 0;
- brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
brd->SerialDriver->init_termios = DgapDefaultTermios;
brd->SerialDriver->driver_name = DRVSTR;
brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
/* The kernel wants space to store pointers to tty_structs */
- brd->SerialDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ brd->SerialDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
if (!brd->SerialDriver->ttys)
return(-ENOMEM);
@@ -259,14 +259,14 @@ int dgap_tty_register(struct board_t *brd)
brd->PrintDriver->name_base = 0;
brd->PrintDriver->major = 0;
brd->PrintDriver->minor_start = 0;
- brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
brd->PrintDriver->init_termios = DgapDefaultTermios;
brd->PrintDriver->driver_name = DRVSTR;
brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
/* The kernel wants space to store pointers to tty_structs */
- brd->PrintDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ brd->PrintDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
if (!brd->PrintDriver->ttys)
return(-ENOMEM);
@@ -380,7 +380,7 @@ int dgap_tty_init(struct board_t *brd)
*/
for (i = 0; i < brd->nasync; i++) {
if (!brd->channels[i]) {
- brd->channels[i] = dgap_driver_kzmalloc(sizeof(struct channel_t), GFP_ATOMIC);
+ brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_ATOMIC);
if (!brd->channels[i]) {
DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
__FILE__, __LINE__));
@@ -450,7 +450,7 @@ int dgap_tty_init(struct board_t *brd)
/*
* Set queue water marks, interrupt mask,
- * and general tty parameters.
+ * and general tty parameters.
*/
ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2;
@@ -479,7 +479,7 @@ int dgap_tty_init(struct board_t *brd)
writew(0, &(ch->ch_bs->edelay));
else
writew(100, &(ch->ch_bs->edelay));
-
+
writeb(1, &(ch->ch_bs->idata));
}
@@ -506,7 +506,7 @@ void dgap_tty_post_uninit(void)
* dgap_tty_uninit()
*
* Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
+ * resources.
*/
void dgap_tty_uninit(struct board_t *brd)
{
@@ -611,7 +611,7 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b
if (n == 0) {
return;
}
-
+
/*
* Copy as much data as will fit.
*/
@@ -661,9 +661,9 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b
/*=======================================================================
*
* dgap_input - Process received data.
- *
+ *
* ch - Pointer to channel structure.
- *
+ *
*=======================================================================*/
void dgap_input(struct channel_t *ch)
@@ -704,8 +704,8 @@ void dgap_input(struct channel_t *ch)
DGAP_LOCK(bd->bd_lock, lock_flags);
DGAP_LOCK(ch->ch_lock, lock_flags2);
- /*
- * Figure the number of characters in the buffer.
+ /*
+ * Figure the number of characters in the buffer.
* Exit immediately if none.
*/
@@ -775,13 +775,13 @@ void dgap_input(struct channel_t *ch)
len = min(len, (N_TTY_BUF_SIZE - 1));
ld = tty_ldisc_ref(tp);
-
+
#ifdef TTY_DONT_FLIP
/*
* If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
+ * like the ld doesn't have any space to put the data right now.
*/
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
len = 0;
#endif
@@ -879,9 +879,9 @@ void dgap_input(struct channel_t *ch)
}
-/************************************************************************
+/************************************************************************
* Determines when CARRIER changes state and takes appropriate
- * action.
+ * action.
************************************************************************/
void dgap_carrier(struct channel_t *ch)
{
@@ -889,7 +889,7 @@ void dgap_carrier(struct channel_t *ch)
int virt_carrier = 0;
int phys_carrier = 0;
-
+
DPR_CARR(("dgap_carrier called...\n"));
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
@@ -917,11 +917,11 @@ void dgap_carrier(struct channel_t *ch)
if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
virt_carrier = 1;
- }
+ }
if (ch->ch_c_cflag & CLOCAL) {
virt_carrier = 1;
- }
+ }
DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
@@ -968,7 +968,7 @@ void dgap_carrier(struct channel_t *ch)
* "make pretend that carrier is there".
*/
if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0))
+ (phys_carrier == 0))
{
/*
@@ -991,7 +991,7 @@ void dgap_carrier(struct channel_t *ch)
tty_hangup(ch->ch_tun.un_tty);
}
- if (ch->ch_pun.un_open_count > 0) {
+ if (ch->ch_pun.un_open_count > 0) {
DPR_CARR(("Sending pr hangup\n"));
tty_hangup(ch->ch_pun.un_tty);
}
@@ -1002,7 +1002,7 @@ void dgap_carrier(struct channel_t *ch)
*/
if (virt_carrier == 1)
ch->ch_flags |= CH_FCAR;
- else
+ else
ch->ch_flags &= ~CH_FCAR;
if (phys_carrier == 1)
@@ -1013,9 +1013,9 @@ void dgap_carrier(struct channel_t *ch)
/************************************************************************
- *
+ *
* TTY Entry points and helper functions
- *
+ *
************************************************************************/
/*
@@ -1165,7 +1165,7 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file)
*/
dgap_param(tty);
- /*
+ /*
* follow protocol for opening port
*/
@@ -1195,13 +1195,13 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file)
}
-/*
+/*
* dgap_block_til_ready()
*
* Wait for DCD, if needed.
*/
static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
-{
+{
int retval = 0;
struct un_t *un = NULL;
ulong lock_flags;
@@ -1246,7 +1246,7 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struc
* If either unit is in the middle of the fragile part of close,
* we just cannot touch the channel safely.
* Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
+ * touched safely, the close routine will signal the
* ch_wait_flags to wake us back up.
*/
if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
@@ -1354,7 +1354,7 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struc
* dgap_tty_hangup()
*
* Hangup the port. Like a close, but don't wait for output to drain.
- */
+ */
static void dgap_tty_hangup(struct tty_struct *tty)
{
struct board_t *bd;
@@ -1436,7 +1436,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
*/
APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
un->un_open_count = 1;
- }
+ }
if (--un->un_open_count < 0) {
APR(("bad serial port open count of %d\n", un->un_open_count));
@@ -1497,7 +1497,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 );
/*
- * Go to sleep to ensure RTS/DTR
+ * Go to sleep to ensure RTS/DTR
* have been dropped for modems to see it.
*/
if (ch->ch_close_delay) {
@@ -1535,7 +1535,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
wake_up_interruptible(&un->un_flags_wait);
DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
+
DPR_BASIC(("dgap_tty_close - complete\n"));
}
@@ -1637,7 +1637,7 @@ static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
}
}
- DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
+ DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
ch->ch_portnum, chars, thead, ttail, ch->ch_tsize));
return(chars);
}
@@ -1702,14 +1702,14 @@ static int dgap_wait_for_drain(struct tty_struct *tty)
}
-/*
+/*
* dgap_maxcps_room
*
* Reduces bytes_available to the max number of characters
* that can be sent currently given the maxcps value, and
* returns the new bytes_available. This only affects printer
* output.
- */
+ */
static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
{
struct channel_t *ch = NULL;
@@ -1750,7 +1750,7 @@ static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
}
else {
/* no room in the buffer */
- cps_limit = 0;
+ cps_limit = 0;
}
bytes_available = min(cps_limit, bytes_available);
@@ -1793,7 +1793,7 @@ static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
* dgap_tty_write_room()
*
* Return space available in Tx buffer
- */
+ */
static int dgap_tty_write_room(struct tty_struct *tty)
{
struct channel_t *ch = NULL;
@@ -1831,7 +1831,7 @@ static int dgap_tty_write_room(struct tty_struct *tty)
ret = dgap_maxcps_room(tty, ret);
/*
- * If we are printer device, leave space for
+ * If we are printer device, leave space for
* possibly both the on and off strings.
*/
if (un->un_type == DGAP_PRINT) {
@@ -1856,7 +1856,7 @@ static int dgap_tty_write_room(struct tty_struct *tty)
*/
dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
+
DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
return(ret);
@@ -1867,7 +1867,7 @@ static int dgap_tty_write_room(struct tty_struct *tty)
* dgap_tty_put_char()
*
* Put a character into ch->ch_buf
- *
+ *
* - used by the line discipline for OPOST processing
*/
static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
@@ -2094,7 +2094,7 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int
if (from_user) {
DGAP_UNLOCK(ch->ch_lock, lock_flags);
up(&dgap_TmpWriteSem);
- }
+ }
else {
DGAP_UNLOCK(ch->ch_lock, lock_flags);
}
@@ -2206,12 +2206,12 @@ static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file,
if (set & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
ch->ch_mval |= D_RTS(ch);
- }
+ }
if (set & TIOCM_DTR) {
ch->ch_mforce |= D_DTR(ch);
ch->ch_mval |= D_DTR(ch);
- }
+ }
if (clear & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
@@ -2316,7 +2316,7 @@ static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
/*
* dgap_send_xchar()
- *
+ *
* send a high priority character, called by ld.
*/
static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
@@ -2529,7 +2529,7 @@ static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, uns
/*
- * dgap_tty_digigeta()
+ * dgap_tty_digigeta()
*
* Ioctl to get the information for ditty.
*
@@ -2571,7 +2571,7 @@ static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retin
/*
- * dgap_tty_digiseta()
+ * dgap_tty_digiseta()
*
* Ioctl to set the information for ditty.
*
@@ -2614,10 +2614,10 @@ static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
- if (ch->ch_digi.digi_maxcps < 1)
+ if (ch->ch_digi.digi_maxcps < 1)
ch->ch_digi.digi_maxcps = 1;
- if (ch->ch_digi.digi_maxcps > 10000)
+ if (ch->ch_digi.digi_maxcps > 10000)
ch->ch_digi.digi_maxcps = 10000;
if (ch->ch_digi.digi_bufsize < 10)
@@ -2647,7 +2647,7 @@ static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
/*
- * dgap_tty_digigetedelay()
+ * dgap_tty_digigetedelay()
*
* Ioctl to get the current edelay setting.
*
@@ -2689,7 +2689,7 @@ static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
/*
- * dgap_tty_digisetedelay()
+ * dgap_tty_digisetedelay()
*
* Ioctl to set the EDELAY setting
*
@@ -2783,7 +2783,7 @@ static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinf
/*
- * dgap_tty_digisetcustombaud()
+ * dgap_tty_digisetcustombaud()
*
* Ioctl to set the custom baud rate setting
*/
@@ -2898,7 +2898,7 @@ static void dgap_tty_throttle(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -2938,7 +2938,7 @@ static void dgap_tty_unthrottle(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -2979,7 +2979,7 @@ static void dgap_tty_start(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3016,7 +3016,7 @@ static void dgap_tty_stop(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3039,7 +3039,7 @@ static void dgap_tty_stop(struct tty_struct *tty)
}
-/*
+/*
* dgap_tty_flush_chars()
*
* Flush the cook buffer
@@ -3066,7 +3066,7 @@ static void dgap_tty_flush_chars(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3092,7 +3092,7 @@ static void dgap_tty_flush_chars(struct tty_struct *tty)
/*
* dgap_tty_flush_buffer()
- *
+ *
* Flush Tx buffer (make in == out)
*/
static void dgap_tty_flush_buffer(struct tty_struct *tty)
@@ -3110,7 +3110,7 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3153,7 +3153,7 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
* The IOCTL function and all of its helpers
*
*****************************************************************************/
-
+
/*
* dgap_tty_ioctl()
*
@@ -3186,7 +3186,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return (-ENODEV);
- DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
DGAP_LOCK(bd->bd_lock, lock_flags);
@@ -3205,7 +3205,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TCSBRK:
/*
- * TCSBRK is SVID version: non-zero arg --> no break
+ * TCSBRK is SVID version: non-zero arg --> no break
* this behaviour is exploited by tcdrain().
*
* According to POSIX.1 spec (7.2.2.1.2) breaks should be
@@ -3236,7 +3236,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return(0);
@@ -3270,7 +3270,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return(0);
@@ -3303,11 +3303,11 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return 0;
-
+
case TIOCCBRK:
/*
* FEP5 doesn't support turning off a break unconditionally.
@@ -3343,7 +3343,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(bd->bd_lock, lock_flags);
return(0);
-
+
case TIOCMGET:
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
@@ -3359,8 +3359,8 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* Here are any additional ioctl's that we want to implement
*/
-
- case TCFLSH:
+
+ case TCFLSH:
/*
* The linux tty driver doesn't have a flush
* input routine for the driver, assuming all backed
@@ -3369,7 +3369,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
* act on the ioctl, but then lie and say we didn't
* so the line discipline will process the flush
* also.
- */
+ */
rc = tty_check_change(tty);
if (rc) {
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
@@ -3407,13 +3407,13 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
tty_wakeup(tty);
DGAP_LOCK(bd->bd_lock, lock_flags);
DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
+ }
- /* pretend we didn't recognize this IOCTL */
+ /* pretend we didn't recognize this IOCTL */
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
__LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return(-ENOIOCTLCMD);
@@ -3445,7 +3445,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return(-EINTR);
}
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
/* pretend we didn't recognize this */
@@ -3462,7 +3462,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
/* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
+ return(-ENOIOCTLCMD);
case TCXONC:
/*
@@ -3572,7 +3572,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(bd->bd_lock, lock_flags);
DPR_IOCTL(("dgap_tty_ioctl - in default\n"));
- DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
dgap_ioctl_name(cmd), cmd, arg));
return(-ENOIOCTLCMD);
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
index 638c5da..1f4aa2e 100644
--- a/drivers/staging/dgap/downld.c
+++ b/drivers/staging/dgap/downld.c
@@ -24,7 +24,7 @@
**
** This is the daemon that sends the fep, bios, and concentrator images
** from user space to the driver.
-** BUGS:
+** BUGS:
** If the file changes in the middle of the download, you probably
** will get what you deserve.
**
@@ -121,7 +121,7 @@ struct downld_t *dp; /* conc. download */
/*
- * The same for either the FEP or the BIOS.
+ * The same for either the FEP or the BIOS.
* Append the downldio header, issue the ioctl, then free
* the buffer. Not horribly CPU efficient, but quite RAM efficient.
*/
@@ -136,7 +136,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
/*
* If this binary comes from a file, stat it to see how
* large it is. Yes, we intentionally do this each
- * time for the binary may change between loads.
+ * time for the binary may change between loads.
*/
if (ii->pathname) {
@@ -144,7 +144,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
if (sfd < 0 ) {
myperror(ii->pathname);
- goto squirt_end;
+ goto squirt_end;
}
if (fstat(sfd, &sb) == -1 ) {
@@ -152,7 +152,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
goto squirt_end;
}
- ii->len = sb.st_size ;
+ ii->len = sb.st_size;
}
size_buf = ii->len + sizeof(struct downldio);
@@ -165,7 +165,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
dliop = (struct downldio *) malloc(size_buf);
if (dliop == NULL) {
- fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
+ fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
pgm, size_buf);
exit (1);
}
@@ -185,7 +185,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
if (debugflag)
printf("sending %d bytes of %s %s from %s\n",
- ii->len,
+ ii->len,
(ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG",
ii->name ? ii->name : "",
(ii->pathname) ? ii->pathname : "internal image" );
@@ -209,13 +209,13 @@ squirt_end:
/*
- * See if we need to reload the download image in core
- *
+ * See if we need to reload the download image in core
+ *
*/
void consider_file_rescan(struct image_info *ii)
{
- int sfd ;
- int len ;
+ int sfd;
+ int len;
struct stat sb;
/* This operation only makes sense when we're working from a file */
@@ -232,14 +232,14 @@ void consider_file_rescan(struct image_info *ii)
myperror(ii->pathname);
exit(1);
}
-
- /* If the file hasn't changed since we last did this,
- * and we have not done a free() on the image, bail
+
+ /* If the file hasn't changed since we last did this,
+ * and we have not done a free() on the image, bail
*/
if (ii->image && (sb.st_mtime == ii->mtime))
goto end_rescan;
- ii->len = len = sb.st_size ;
+ ii->len = len = sb.st_size;
/* Record the timestamp of the file */
ii->mtime = sb.st_mtime;
@@ -249,12 +249,12 @@ void consider_file_rescan(struct image_info *ii)
* have a memory leak.
*/
if ( ii->image ) {
- free( ii->image );
+ free( ii->image );
/* ii->image = NULL; */ /* not necessary */
}
- /* This image will be kept only long enough for the
- * download to happen. After sending the last block,
+ /* This image will be kept only long enough for the
+ * download to happen. After sending the last block,
* it will be freed
*/
ii->image = malloc(len) ;
@@ -267,14 +267,14 @@ void consider_file_rescan(struct image_info *ii)
}
if (read(sfd, ii->image, len) < len) {
- fprintf(stderr,"%s: read error on %s; aborting\n",
+ fprintf(stderr,"%s: read error on %s; aborting\n",
pgm, ii->pathname);
exit (1);
}
end_rescan:
close(sfd);
-
+
}
}
@@ -284,12 +284,12 @@ end_rescan:
struct image_info * find_conc_image()
{
- int x ;
- struct image_info *i = NULL ;
+ int x;
+ struct image_info *i = NULL;
for ( x = 0; x < nimages; x++ ) {
i=&image_list[x];
-
+
if(i->type != ICONC)
continue;
@@ -305,8 +305,8 @@ struct image_info * find_conc_image()
*/
if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev ))
return i;
- }
- return NULL ;
+ }
+ return NULL;
}
@@ -378,7 +378,7 @@ int main(int argc, char **argv)
** the list before built in images so that the command line images
** can override the built in ones.
*/
-
+
/* allocate space for the list */
nimages = argc - 2;
@@ -390,15 +390,15 @@ int main(int argc, char **argv)
nimages += count;
/* Really should just remove the variable "image_list".... robertl */
- image_list = images ;
-
+ image_list = images;
+
/* get the images from the command line */
for(x = 2; x < argc; x++) {
- int xx;
+ int xx;
/*
- * strip off any leading path information for
- * determining file type
+ * strip off any leading path information for
+ * determining file type
*/
if( (fname = strrchr(argv[x],'/')) == NULL)
fname = argv[x];
@@ -406,18 +406,18 @@ int main(int argc, char **argv)
fname++; /* skip the slash */
for (xx = 0; xx < count; xx++) {
- if (strcmp(fname, images[xx].fname) == 0 ) {
+ if (strcmp(fname, images[xx].fname) == 0 ) {
images[xx].pathname = argv[x];
/* image should be NULL until */
/* space is malloced */
- images[xx].image = NULL ;
+ images[xx].image = NULL;
}
}
}
sleep(3);
-
+
/*
** Endless loop: get a request from the fep, and service that request.
*/
@@ -425,7 +425,7 @@ int main(int argc, char **argv)
/* get the request */
if (debugflag)
printf("b4 get ioctl...");
-
+
if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) {
if (errorprint) {
fprintf(stderr,
@@ -438,7 +438,7 @@ int main(int argc, char **argv)
if (debugflag)
printf("dlio.req_type is %d bd %d\n",
dlio.req_type,dlio.bdid);
-
+
switch(dlio.req_type) {
case DLREQ_BIOS:
/*
@@ -447,18 +447,18 @@ int main(int argc, char **argv)
for ( x = 0; x < nimages; x++ ) {
if(image_list[x].type != IBIOS)
continue;
-
- if ((dlio.image.fi.type & FAMILY) ==
+
+ if ((dlio.image.fi.type & FAMILY) ==
image_list[x].family) {
-
- if ( image_list[x].family == T_CX ) {
- if ((dlio.image.fi.type & BUSTYPE)
+
+ if ( image_list[x].family == T_CX ) {
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
break;
}
}
@@ -466,15 +466,15 @@ int main(int argc, char **argv)
/* If subtype of image is T_PCIBUS, it is */
/* a PCI EPC image, so the board must */
/* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
/* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
!= T_PCIBUS )
break;
}
@@ -484,12 +484,12 @@ int main(int argc, char **argv)
}
else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
/* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
+ if ( image_list[x].subtype == T_PCXR ) {
break;
}
}
}
-
+
if ( x >= nimages) {
/*
** no valid images exist
@@ -514,7 +514,7 @@ int main(int argc, char **argv)
}
squirt(dlio.req_type, dlio.bdid, &image_list[x]);
break ;
-
+
case DLREQ_FEP:
/*
** find the fep image for this type
@@ -522,17 +522,17 @@ int main(int argc, char **argv)
for ( x = 0; x < nimages; x++ ) {
if(image_list[x].type != IFEP)
continue;
- if( (dlio.image.fi.type & FAMILY) ==
+ if( (dlio.image.fi.type & FAMILY) ==
image_list[x].family ) {
- if ( image_list[x].family == T_CX ) {
+ if ( image_list[x].family == T_CX ) {
/* C/X PCI board */
- if ((dlio.image.fi.type & BUSTYPE)
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
/* Regular CX */
break;
}
@@ -541,15 +541,15 @@ int main(int argc, char **argv)
/* If subtype of image is T_PCIBUS, it is */
/* a PCI EPC image, so the board must */
/* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
/* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
!= T_PCIBUS )
break;
}
@@ -559,12 +559,12 @@ int main(int argc, char **argv)
}
else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
/* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
+ if ( image_list[x].subtype == T_PCXR ) {
break;
}
}
}
-
+
if ( x >= nimages) {
/*
** no valid images exist
@@ -613,7 +613,7 @@ int main(int argc, char **argv)
}
break;
-
+
case DLREQ_CONFIG:
for ( x = 0; x < nimages; x++ ) {
if(image_list[x].type != ICONFIG)
@@ -658,15 +658,15 @@ int main(int argc, char **argv)
*/
for ( x = 0; x < nimages; x++ ) {
ii=&image_list[x];
-
+
if(image_list[x].type != ICONC)
continue;
-
+
consider_file_rescan(ii) ;
-
+
ip = (struct downld_t *) image_list[x].image;
if (ip == NULL) continue;
-
+
/*
* When I removed Clusterport, I kept only the
* code that I was SURE wasn't ClusterPort.
@@ -674,11 +674,11 @@ int main(int argc, char **argv)
*/
if ((dp->dl_type != 'P' ) &&
- (ip->dl_lrev <= dp->dl_lrev ) &&
+ (ip->dl_lrev <= dp->dl_lrev ) &&
( dp->dl_lrev <= ip->dl_hrev))
break;
}
-
+
if ( x >= nimages ) {
/*
** No valid images exist
@@ -691,7 +691,7 @@ int main(int argc, char **argv)
}
continue;
}
-
+
} else {
/*
** find image version required
@@ -706,40 +706,40 @@ int main(int argc, char **argv)
continue;
}
}
-
+
/*
** download block of image
*/
-
+
offset = 1024 * dp->dl_seq;
-
+
/*
** test if block requested within image
*/
- if ( offset < ii->len ) {
-
+ if ( offset < ii->len ) {
+
/*
** if it is, determine block size, set segment,
** set size, set pointers, and copy block
*/
if (( bsize = ii->len - offset ) > 1024 )
bsize = 1024;
-
+
/*
** copy image version info to download area
*/
dp->dl_srev = ip->dl_srev;
dp->dl_lrev = ip->dl_lrev;
dp->dl_hrev = ip->dl_hrev;
-
+
dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg;
dp->dl_size = bsize;
-
+
down = (char *)&dp->dl_data[0];
image = (char *)((char *)ip + offset);
-
+
memcpy(down, image, bsize);
- }
+ }
else {
/*
** Image has been downloaded, set segment and
@@ -747,24 +747,24 @@ int main(int argc, char **argv)
*/
dp->dl_seg = ip->dl_seg;
dp->dl_size = 0;
-
+
/* Now, we can release the concentrator */
/* image from memory if we're running */
/* from filesystem images */
-
+
if (ii->pathname)
if (ii->image) {
free(ii->image);
- ii->image = NULL ;
- }
+ ii->image = NULL;
+ }
}
-
+
if (debugflag)
printf(
"sending conc dl section %d to %s from %s\n",
dp->dl_seq, ii->name,
ii->pathname ? ii->pathname : "Internal Image");
-
+
if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
if (errorprint) {
fprintf(stderr,
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index fdc1aab..708adbb 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -119,7 +119,10 @@ static inline void cls_set_cts_flow_control(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Enable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ /*
+ * Enable interrupts for CTS flow, turn off interrupts for
+ * received XOFF chars
+ */
ier |= (UART_EXAR654_IER_CTSDSR);
ier &= ~(UART_EXAR654_IER_XOFF);
writeb(ier, &ch->ch_cls_uart->ier);
@@ -167,7 +170,10 @@ static inline void cls_set_ixon_flow_control(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Disable interrupts for CTS flow, turn on interrupts for received XOFF chars */
+ /*
+ * Disable interrupts for CTS flow, turn on interrupts for
+ * received XOFF chars
+ */
ier &= ~(UART_EXAR654_IER_CTSDSR);
ier |= (UART_EXAR654_IER_XOFF);
writeb(ier, &ch->ch_cls_uart->ier);
@@ -207,7 +213,10 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Disable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ /*
+ * Disable interrupts for CTS flow, turn off interrupts for
+ * received XOFF chars
+ */
ier &= ~(UART_EXAR654_IER_CTSDSR);
ier &= ~(UART_EXAR654_IER_XOFF);
writeb(ier, &ch->ch_cls_uart->ier);
@@ -220,8 +229,8 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch)
&ch->ch_cls_uart->isr_fcr);
ch->ch_r_watermark = 0;
- ch->ch_t_tlevel = 16;
- ch->ch_r_tlevel = 16;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
}
@@ -350,8 +359,8 @@ static inline void cls_set_no_input_flow_control(struct channel_t *ch)
UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
&ch->ch_cls_uart->isr_fcr);
- ch->ch_t_tlevel = 16;
- ch->ch_r_tlevel = 16;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
}
@@ -380,12 +389,13 @@ static inline void cls_clear_break(struct channel_t *ch, int force)
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
- if ((jiffies >= ch->ch_stop_sending_break) || force) {
+ if (time_after(jiffies, ch->ch_stop_sending_break) || force) {
uchar temp = readb(&ch->ch_cls_uart->lcr);
- writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags &= ~(CH_BREAK_SENDING);
ch->ch_stop_sending_break = 0;
- DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n",
+ jiffies));
}
}
DGNC_UNLOCK(ch->ch_lock, lock_flags);
@@ -420,7 +430,8 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
if (isr & UART_IIR_NO_INT)
break;
- DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, port, isr));
+ DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__,
+ port, isr));
/* Receive Interrupt pending */
if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
@@ -473,11 +484,11 @@ static void cls_param(struct tty_struct *tty)
uchar uart_lcr = 0;
uchar ier = 0;
uchar uart_ier = 0;
- uint baud = 9600;
+ uint baud = 9600;
int quot = 0;
- struct dgnc_board *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
- struct un_t *un;
+ struct un_t *un;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -495,7 +506,8 @@ static void cls_param(struct tty_struct *tty)
return;
DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
- ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag,
+ ch->ch_c_iflag));
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
@@ -506,7 +518,7 @@ static void cls_param(struct tty_struct *tty)
ch->ch_w_head = ch->ch_w_tail = 0;
cls_flush_uart_write(ch);
- cls_flush_uart_read(ch);
+ cls_flush_uart_read(ch);
/* The baudrate is B0 so all modem lines are to be dropped. */
ch->ch_flags |= (CH_BAUD0);
@@ -558,8 +570,12 @@ static void cls_param(struct tty_struct *tty)
4800, 9600, 19200, 38400 }
};
- /* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ /*
+ * Only use the TXPrint baud rate if the terminal
+ * unit is NOT open
+ */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGNC_PRINT))
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
else
baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
@@ -572,7 +588,8 @@ static void cls_param(struct tty_struct *tty)
jindex = baud;
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) &&
+ (jindex < 16)) {
baud = bauds[iindex][jindex];
} else {
DPR_IOCTL(("baud indices were out of range (%d)(%d)",
@@ -598,13 +615,11 @@ static void cls_param(struct tty_struct *tty)
}
}
- if (ch->ch_c_cflag & PARENB) {
+ if (ch->ch_c_cflag & PARENB)
lcr |= UART_LCR_PARITY;
- }
- if (!(ch->ch_c_cflag & PARODD)) {
+ if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- }
/*
* Not all platforms support mark/space parity,
@@ -648,31 +663,28 @@ static void cls_param(struct tty_struct *tty)
writeb((quot & 0xff), &ch->ch_cls_uart->txrx);
writeb((quot >> 8), &ch->ch_cls_uart->ier);
writeb(lcr, &ch->ch_cls_uart->lcr);
- }
+ }
if (uart_lcr != lcr)
writeb(lcr, &ch->ch_cls_uart->lcr);
- if (ch->ch_c_cflag & CREAD) {
+ if (ch->ch_c_cflag & CREAD)
ier |= (UART_IER_RDI | UART_IER_RLSI);
- }
- else {
+ else
ier &= ~(UART_IER_RDI | UART_IER_RLSI);
- }
/*
* Have the UART interrupt on modem signal changes ONLY when
* we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
*/
- if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) ||
- (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
+ if ((ch->ch_digi.digi_flags & CTSPACE) ||
+ (ch->ch_digi.digi_flags & RTSPACE) ||
+ (ch->ch_c_cflag & CRTSCTS) ||
+ !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
!(ch->ch_c_cflag & CLOCAL))
- {
- ier |= UART_IER_MSI;
- }
- else {
- ier &= ~UART_IER_MSI;
- }
+ ier |= UART_IER_MSI;
+ else
+ ier &= ~UART_IER_MSI;
ier |= UART_IER_THRI;
@@ -681,29 +693,33 @@ static void cls_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
cls_set_cts_flow_control(ch);
- }
- else if (ch->ch_c_iflag & IXON) {
- /* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ } else if (ch->ch_c_iflag & IXON) {
+ /*
+ * If start/stop is set to disable, then we should
+ * disable flow control
+ */
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
cls_set_no_output_flow_control(ch);
else
cls_set_ixon_flow_control(ch);
- }
- else {
+ } else {
cls_set_no_output_flow_control(ch);
}
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
cls_set_rts_flow_control(ch);
- }
- else if (ch->ch_c_iflag & IXOFF) {
- /* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ } else if (ch->ch_c_iflag & IXOFF) {
+ /*
+ * If start/stop is set to disable, then we should disable
+ * flow control
+ */
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
cls_set_no_input_flow_control(ch);
else
cls_set_ixoff_flow_control(ch);
- }
- else {
+ } else {
cls_set_no_input_flow_control(ch);
}
@@ -719,7 +735,7 @@ static void cls_param(struct tty_struct *tty)
*/
static void cls_tasklet(unsigned long data)
{
- struct dgnc_board *bd = (struct dgnc_board *) data;
+ struct dgnc_board *bd = (struct dgnc_board *) data;
struct channel_t *ch;
ulong lock_flags;
int i;
@@ -802,7 +818,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
unsigned long lock_flags;
if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n", irq));
+ APR(("Received interrupt (%d) with null board associated\n",
+ irq));
return IRQ_NONE;
}
@@ -810,7 +827,9 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
* Check to make sure its for us.
*/
if (brd->magic != DGNC_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ APR((
+ "Received interrupt (%d) with a board pointer "
+ "that wasn't ours!\n", irq));
return IRQ_NONE;
}
@@ -826,7 +845,9 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
/* If 0, no interrupts pending */
if (!poll_reg) {
- DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n"));
+ DPR_INTR((
+ "Kernel interrupted to me, but no pending "
+ "interrupts...\n"));
DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
return IRQ_NONE;
}
@@ -834,9 +855,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
DPR_INTR(("%s:%d poll_reg: %x\n", __FILE__, __LINE__, poll_reg));
/* Parse each port to find out what caused the interrupt */
- for (i = 0; i < brd->nasync; i++) {
+ for (i = 0; i < brd->nasync; i++)
cls_parse_isr(brd, i);
- }
/*
* Schedule tasklet to more in-depth servicing at a better time.
@@ -868,8 +888,8 @@ static void cls_enable_receiver(struct channel_t *ch)
static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
{
- int qleft = 0;
- uchar linestatus = 0;
+ int qleft = 0;
+ uchar linestatus = 0;
uchar error_mask = 0;
ushort head;
ushort tail;
@@ -885,7 +905,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
tail = ch->ch_r_tail;
/* Store how much space we have left in the queue */
- if ((qleft = tail - head - 1) < 0)
+ qleft = (tail - head - 1);
+ if (qleft < 0)
qleft += RQUEUEMASK + 1;
/*
@@ -912,9 +933,9 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
}
/*
- * If our queue is full, we have no choice but to drop some data.
- * The assumption is that HWFLOW or SWFLOW should have stopped
- * things way way before we got to this point.
+ * If our queue is full, we have no choice but to drop some
+ * data. The assumption is that HWFLOW or SWFLOW should have
+ * stopped things way way before we got to this point.
*
* I decided that I wanted to ditch the oldest data first,
* I hope thats okay with everyone? Yes? Good.
@@ -928,13 +949,16 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
qleft++;
}
- ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE);
+ ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
+ | UART_LSR_FE);
ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
+ dgnc_sniff_nowait_nolock(ch, "UART READ",
+ ch->ch_rqueue + head, 1);
qleft--;
- DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]));
+ DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head],
+ ch->ch_equeue[head]));
if (ch->ch_equeue[head] & UART_LSR_PE)
ch->ch_err_parity++;
@@ -966,22 +990,19 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
{
ulong lock_flags;
struct channel_t *ch;
- struct un_t *un;
+ struct un_t *un;
int rc = 0;
- if (!tty || tty->magic != TTY_MAGIC) {
+ if (!tty || tty->magic != TTY_MAGIC)
return -ENXIO;
- }
un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
return -ENXIO;
- }
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENXIO;
- }
DGNC_LOCK(ch->ch_lock, lock_flags);
un->un_flags |= UN_EMPTY;
@@ -990,24 +1011,25 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
/*
* NOTE: Do something with time passed in.
*/
- rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+ rc = wait_event_interruptible(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
- return rc;
+ return rc;
}
/* Channel lock MUST be held before calling this function! */
static void cls_flush_uart_write(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- }
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
udelay(10);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
@@ -1017,9 +1039,8 @@ static void cls_flush_uart_write(struct channel_t *ch)
/* Channel lock MUST be held before calling this function! */
static void cls_flush_uart_read(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- }
/*
* For complete POSIX compatibility, we should be purging the
@@ -1032,7 +1053,8 @@ static void cls_flush_uart_read(struct channel_t *ch)
* So for now, we will leave the code #ifdef'ed out...
*/
#if 0
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
#endif
udelay(10);
}
@@ -1059,7 +1081,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
}
/* If port is "stopped", don't send any data to the UART */
- if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) {
+ if ((ch->ch_flags & CH_FORCED_STOP) ||
+ (ch->ch_flags & CH_BREAK_SENDING)) {
DGNC_UNLOCK(ch->ch_lock, lock_flags);
return;
}
@@ -1071,10 +1094,10 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
n = 32;
- /* cache head and tail of queue */
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
/* Find minimum of the FIFO space, versus queue length */
n = min(n, qlen);
@@ -1083,7 +1106,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
/*
* If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * and make sure we get an event when the data transfer has
+ * completed.
*/
if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_RTS)) {
@@ -1095,7 +1119,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
/*
* If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * and make sure we get an event when the data transfer has
+ * completed.
*/
if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_DTR)) {
@@ -1105,7 +1130,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + ch->ch_w_tail, 1);
+ dgnc_sniff_nowait_nolock(ch, "UART WRITE",
+ ch->ch_wqueue + ch->ch_w_tail, 1);
DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_tail]));
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
@@ -1125,17 +1151,20 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
static void cls_parse_modem(struct channel_t *ch, uchar signals)
{
- volatile uchar msignals = signals;
+ uchar msignals = signals;
+ ulong lock_flags;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n", ch->ch_portnum, msignals));
+ DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n",
+ ch->ch_portnum, msignals));
/*
* Do altpin switching. Altpin switches DCD and DSR.
* This prolly breaks DSRPACE, so we should be more clever here.
*/
+ DGNC_LOCK(ch->ch_lock, lock_flags);
if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
uchar mswap = signals;
if (mswap & UART_MSR_DDCD) {
@@ -1155,10 +1184,15 @@ static void cls_parse_modem(struct channel_t *ch, uchar signals)
msignals |= UART_MSR_DCD;
}
}
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
- /* Scrub off lower bits. They signify delta's, which I don't care about */
+ /*
+ * Scrub off lower bits. They signify delta's, which I don't
+ * care about
+ */
signals &= 0xf0;
+ DGNC_LOCK(ch->ch_lock, lock_flags);
if (msignals & UART_MSR_DCD)
ch->ch_mistat |= UART_MSR_DCD;
else
@@ -1178,9 +1212,11 @@ static void cls_parse_modem(struct channel_t *ch, uchar signals)
ch->ch_mistat |= UART_MSR_CTS;
else
ch->ch_mistat &= ~UART_MSR_CTS;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
- DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ DPR_MSIGS((
+ "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
ch->ch_portnum,
!!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
!!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
@@ -1204,7 +1240,7 @@ static void cls_assert_modem_signals(struct channel_t *ch)
if (ch->ch_flags & CH_LOOPBACK)
out |= UART_MCR_LOOP;
- writeb(out, &ch->ch_cls_uart->mcr);
+ writeb(out, &ch->ch_cls_uart->mcr);
/* Give time for the UART to actually drop the signals */
udelay(10);
@@ -1219,7 +1255,7 @@ static void cls_send_start_character(struct channel_t *ch)
if (ch->ch_startc != _POSIX_VDISABLE) {
ch->ch_xon_sends++;
writeb(ch->ch_startc, &ch->ch_cls_uart->txrx);
- }
+ }
}
@@ -1231,7 +1267,7 @@ static void cls_send_stop_character(struct channel_t *ch)
if (ch->ch_stopc != _POSIX_VDISABLE) {
ch->ch_xoff_sends++;
writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx);
- }
+ }
}
@@ -1259,10 +1295,11 @@ static void cls_uart_init(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Clear out UART and FIFO */
+ /* Clear out UART and FIFO */
readb(&ch->ch_cls_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
udelay(10);
ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
@@ -1302,8 +1339,7 @@ static uint cls_get_uart_bytes_left(struct channel_t *ch)
if (ch->ch_flags & CH_TX_FIFO_EMPTY)
tasklet_schedule(&ch->ch_bd->helper_tasklet);
left = 1;
- }
- else {
+ } else {
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
left = 0;
}
@@ -1333,10 +1369,11 @@ static void cls_send_break(struct channel_t *ch, int msecs)
writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags &= ~(CH_BREAK_SENDING);
ch->ch_stop_sending_break = 0;
- DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n",
+ jiffies));
}
return;
- }
+ }
/*
* Set the time we should stop sending the break.
@@ -1350,7 +1387,9 @@ static void cls_send_break(struct channel_t *ch, int msecs)
uchar temp = readb(&ch->ch_cls_uart->lcr);
writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags |= (CH_BREAK_SENDING);
- DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
+ DPR_IOCTL((
+ "Port %d. Starting UART_LCR_SBC! start: %lx "
+ "should end: %lx\n",
ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
}
}
@@ -1373,8 +1412,8 @@ static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
static void cls_vpd(struct dgnc_board *brd)
{
- ulong vpdbase; /* Start of io base of the card */
- u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
+ ulong vpdbase; /* Start of io base of the card */
+ u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
int i = 0;
@@ -1389,12 +1428,12 @@ static void cls_vpd(struct dgnc_board *brd)
if (!re_map_vpdbase)
return;
- /* Store the VPD into our buffer */
- for (i = 0; i < 0x40; i++) {
+ /* Store the VPD into our buffer */
+ for (i = 0; i < 0x40; i++) {
brd->vpd[i] = readb(re_map_vpdbase + i);
- printk("%x ", brd->vpd[i]);
- }
- printk("\n");
+ pr_info("%x ", brd->vpd[i]);
+ }
+ pr_info("\n");
if (re_map_vpdbase)
iounmap(re_map_vpdbase);
diff --git a/drivers/staging/dgnc/dgnc_trace.c b/drivers/staging/dgnc/dgnc_trace.c
index a98b7d4..2f62f2a 100644
--- a/drivers/staging/dgnc/dgnc_trace.c
+++ b/drivers/staging/dgnc/dgnc_trace.c
@@ -35,6 +35,7 @@
#include <linux/vmalloc.h>
#include "dgnc_driver.h"
+#include "dgnc_trace.h"
#define TRC_TO_CONSOLE 1
@@ -63,16 +64,16 @@ void dgnc_tracef(const char *fmt, ...)
void dgnc_tracef(const char *fmt, ...)
{
- va_list ap;
- char buf[TRC_MAXMSG+1];
- size_t lenbuf;
- int i;
- static int failed = FALSE;
+ va_list ap;
+ char buf[TRC_MAXMSG+1];
+ size_t lenbuf;
+ int i;
+ static int failed = FALSE;
# if defined(TRC_TO_KMEM)
unsigned long flags;
#endif
- if(failed)
+ if (failed)
return;
# if defined(TRC_TO_KMEM)
DGNC_LOCK(dgnc_tracef_lock, flags);
@@ -86,7 +87,7 @@ void dgnc_tracef(const char *fmt, ...)
# if defined(TRC_TO_KMEM)
{
- static int initd=0;
+ static int initd = 0;
/*
* Now, in addition to (or instead of) printing this stuff out
@@ -95,7 +96,7 @@ void dgnc_tracef(const char *fmt, ...)
*/
if (!initd) {
dgnc_trcbuf = (char *) vmalloc(dgnc_trcbuf_size);
- if(!dgnc_trcbuf) {
+ if (!dgnc_trcbuf) {
failed = TRUE;
printk("dgnc: tracing init failed!\n");
return;
@@ -179,6 +180,6 @@ void dgnc_tracef(const char *fmt, ...)
*/
void dgnc_tracer_free(void)
{
- if(dgnc_trcbuf)
+ if (dgnc_trcbuf)
vfree(dgnc_trcbuf);
}
diff --git a/drivers/staging/dgrp/dgrp_driver.c b/drivers/staging/dgrp/dgrp_driver.c
index 08eedf0..b60a8da 100644
--- a/drivers/staging/dgrp/dgrp_driver.c
+++ b/drivers/staging/dgrp/dgrp_driver.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tty.h>
-#include <linux/init.h>
/*
* PortServer includes
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 33ac7fb..1f61b89 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,6 +2232,177 @@ done:
return rtn;
}
+/*
+ * Common Packet Handling code
+ */
+
+static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
+ long dlen, long plen, int n1, u8 *dbuf)
+{
+ char *error;
+ long n;
+ long remain;
+ u8 *buf;
+ u8 *b;
+
+ remain = nd->nd_remain;
+ nd->nd_tx_work = 1;
+
+ /*
+ * Otherwise data should appear only when we are
+ * in the CS_READY state.
+ */
+
+ if (ch->ch_state < CS_READY) {
+ error = "Data received before RWIN established";
+ nd->nd_remain = 0;
+ nd->nd_state = NS_SEND_ERROR;
+ nd->nd_error = error;
+ }
+
+ /*
+ * Assure that the data received is within the
+ * allowable window.
+ */
+
+ n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+ if (dlen > n) {
+ error = "Receive data overrun";
+ nd->nd_remain = 0;
+ nd->nd_state = NS_SEND_ERROR;
+ nd->nd_error = error;
+ }
+
+ /*
+ * If we received 3 or less characters,
+ * assume it is a human typing, and set RTIME
+ * to 10 milliseconds.
+ *
+ * If we receive 10 or more characters,
+ * assume its not a human typing, and set RTIME
+ * to 100 milliseconds.
+ */
+
+ if (ch->ch_edelay != DGRP_RTIME) {
+ if (ch->ch_rtime != ch->ch_edelay) {
+ ch->ch_rtime = ch->ch_edelay;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else if (dlen <= 3) {
+ if (ch->ch_rtime != 10) {
+ ch->ch_rtime = 10;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else {
+ if (ch->ch_rtime != DGRP_RTIME) {
+ ch->ch_rtime = DGRP_RTIME;
+ ch->ch_flag |= CH_PARAM;
+ }
+ }
+
+ /*
+ * If a portion of the packet is outside the
+ * buffer, shorten the effective length of the
+ * data packet to be the amount of data received.
+ */
+
+ if (remain < plen)
+ dlen -= plen - remain;
+
+ /*
+ * Detect if receive flush is now complete.
+ */
+
+ if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+ ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+ ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
+ ch->ch_flag &= ~CH_RX_FLUSH;
+ }
+
+ /*
+ * If we are ready to receive, move the data into
+ * the receive buffer.
+ */
+
+ ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+ if (ch->ch_state == CS_READY &&
+ (ch->ch_tun.un_open_count != 0) &&
+ (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+ (ch->ch_cflag & CF_CREAD) != 0 &&
+ (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+ (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+ if (ch->ch_rin + dlen >= RBUF_MAX) {
+ n = RBUF_MAX - ch->ch_rin;
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+ ch->ch_rin = 0;
+ dbuf += n;
+ dlen -= n;
+ }
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+ ch->ch_rin += dlen;
+
+
+ /*
+ * If we are not in fastcook mode, or
+ * if there is a fastcook thread
+ * waiting for data, send the data to
+ * the line discipline.
+ */
+
+ if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+ ch->ch_inwait != 0) {
+ dgrp_input(ch);
+ }
+
+ /*
+ * If there is a read thread waiting
+ * in select, and we are in fastcook
+ * mode, wake him up.
+ */
+
+ if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+ (ch->ch_flag & CH_FAST_READ) != 0)
+ wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+ /*
+ * Wake any thread waiting in the
+ * fastcook loop.
+ */
+
+ if ((ch->ch_flag & CH_INPUT) != 0) {
+ ch->ch_flag &= ~CH_INPUT;
+ wake_up_interruptible(&ch->ch_flag_wait);
+ }
+ }
+
+ /*
+ * Fabricate and insert a data packet header to
+ * preced the remaining data when it comes in.
+ */
+
+ if (remain < plen) {
+ dlen = plen - remain;
+ b = buf;
+
+ b[0] = 0x90 + n1;
+ put_unaligned_be16(dlen, b + 1);
+
+ remain = 3;
+ if (remain > 0 && b != buf)
+ memcpy(buf, b, remain);
+
+ nd->nd_remain = remain;
+ return;
+ }
+}
+
/**
* dgrp_receive() -- decode data packets received from the remote PortServer.
* @nd: pointer to a node structure
@@ -2306,7 +2477,8 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 1;
dbuf = b + 1;
- goto data;
+ handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
+ break;
/*
* Process 2-byte header data packet.
@@ -2320,7 +2492,8 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 2;
dbuf = b + 2;
- goto data;
+ handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
+ break;
/*
* Process 3-byte header data packet.
@@ -2335,159 +2508,6 @@ static void dgrp_receive(struct nd_struct *nd)
dbuf = b + 3;
- /*
- * Common packet handling code.
- */
-
-data:
- nd->nd_tx_work = 1;
-
- /*
- * Otherwise data should appear only when we are
- * in the CS_READY state.
- */
-
- if (ch->ch_state < CS_READY) {
- error = "Data received before RWIN established";
- goto prot_error;
- }
-
- /*
- * Assure that the data received is within the
- * allowable window.
- */
-
- n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
- if (dlen > n) {
- error = "Receive data overrun";
- goto prot_error;
- }
-
- /*
- * If we received 3 or less characters,
- * assume it is a human typing, and set RTIME
- * to 10 milliseconds.
- *
- * If we receive 10 or more characters,
- * assume its not a human typing, and set RTIME
- * to 100 milliseconds.
- */
-
- if (ch->ch_edelay != DGRP_RTIME) {
- if (ch->ch_rtime != ch->ch_edelay) {
- ch->ch_rtime = ch->ch_edelay;
- ch->ch_flag |= CH_PARAM;
- }
- } else if (dlen <= 3) {
- if (ch->ch_rtime != 10) {
- ch->ch_rtime = 10;
- ch->ch_flag |= CH_PARAM;
- }
- } else {
- if (ch->ch_rtime != DGRP_RTIME) {
- ch->ch_rtime = DGRP_RTIME;
- ch->ch_flag |= CH_PARAM;
- }
- }
-
- /*
- * If a portion of the packet is outside the
- * buffer, shorten the effective length of the
- * data packet to be the amount of data received.
- */
-
- if (remain < plen)
- dlen -= plen - remain;
-
- /*
- * Detect if receive flush is now complete.
- */
-
- if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
- ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
- ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
- ch->ch_flag &= ~CH_RX_FLUSH;
- }
-
- /*
- * If we are ready to receive, move the data into
- * the receive buffer.
- */
-
- ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
- if (ch->ch_state == CS_READY &&
- (ch->ch_tun.un_open_count != 0) &&
- (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
- (ch->ch_cflag & CF_CREAD) != 0 &&
- (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
- (ch->ch_send & RR_RX_FLUSH) == 0) {
-
- if (ch->ch_rin + dlen >= RBUF_MAX) {
- n = RBUF_MAX - ch->ch_rin;
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
- ch->ch_rin = 0;
- dbuf += n;
- dlen -= n;
- }
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
- ch->ch_rin += dlen;
-
-
- /*
- * If we are not in fastcook mode, or
- * if there is a fastcook thread
- * waiting for data, send the data to
- * the line discipline.
- */
-
- if ((ch->ch_flag & CH_FAST_READ) == 0 ||
- ch->ch_inwait != 0) {
- dgrp_input(ch);
- }
-
- /*
- * If there is a read thread waiting
- * in select, and we are in fastcook
- * mode, wake him up.
- */
-
- if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
- (ch->ch_flag & CH_FAST_READ) != 0)
- wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
- /*
- * Wake any thread waiting in the
- * fastcook loop.
- */
-
- if ((ch->ch_flag & CH_INPUT) != 0) {
- ch->ch_flag &= ~CH_INPUT;
-
- wake_up_interruptible(&ch->ch_flag_wait);
- }
- }
-
- /*
- * Fabricate and insert a data packet header to
- * preced the remaining data when it comes in.
- */
-
- if (remain < plen) {
- dlen = plen - remain;
- b = buf;
-
- b[0] = 0x90 + n1;
- put_unaligned_be16(dlen, b + 1);
-
- remain = 3;
- goto done;
- }
break;
/*
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 0d52de3..7a9694c 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -371,7 +371,7 @@ static void drp_param(struct ch_struct *ch)
ch->ch_flag |= CH_BAUD0;
}
} else if (ch->ch_custom_speed) {
- ch->ch_brate = PORTSERVER_DIVIDEND / ch->ch_custom_speed ;
+ ch->ch_brate = PORTSERVER_DIVIDEND / ch->ch_custom_speed;
if (ch->ch_flag & CH_BAUD0) {
ch->ch_mout |= DM_DTR | DM_RTS;
@@ -752,7 +752,7 @@ static int dgrp_tty_open(struct tty_struct *tty, struct file *file)
if (ch->ch_open_error != 0 && otype == ch->ch_otype) {
retval = (ch->ch_open_error <= 2) ?
- delay_error : -ENXIO ;
+ delay_error : -ENXIO;
goto unlock;
}
diff --git a/drivers/staging/dwc2/TODO b/drivers/staging/dwc2/TODO
deleted file mode 100644
index 282470d..0000000
--- a/drivers/staging/dwc2/TODO
+++ /dev/null
@@ -1,33 +0,0 @@
-TODO:
- - Dan Carpenter would like to see some cleanups to the microframe
- scheduler code:
- http://www.mail-archive.com/linux-usb@vger.kernel.org/msg26650.html
-
- - Should merge the NAK holdoff patch from Raspberry Pi
- (http://marc.info/?l=linux-usb&m=137625067103833). But as it stands
- that patch is incomplete, it needs more investigation to see if it
- can be made to work for non-Raspberry Pi platforms that lack the
- special FIQ interrupt that the Pi has. Without this patch, the driver
- has a high interrupt rate (8K/sec).
-
- - The Raspberry Pi platform needs to have support for its FIQ interrupt
- added, to get the same level of functionality as the downstream
- driver. The raspberrypi.org developers have indicated they are
- willing to help with that.
-
- - Some of the default driver parameters (see 'struct dwc2_core_params'
- in core.h) won't work for many platforms. So DT attributes will need
- to be added for some of these. But that can be done as-needed as new
- platforms are added.
-
- - Eventually the driver should be merged with the s3c-hsotg peripheral
- mode driver, so that both modes of operation can be supported with a
- single driver. But I think that can wait till after the driver has
- been moved to mainline.
-
- - After that, OTG support can be added. I'm not sure how much demand
- there is for that, though, so I have that as a low priority.
-
-Please send any patches for this driver to Paul Zimmerman <paulz@synopsys.com>
-and Greg Kroah-Hartman <gregkh@linuxfoundation.org>. And please CC linux-usb
-<linux-usb@vger.kernel.org> too.
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 8da96a6..3befc45 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -13,10 +13,6 @@ TODO:
- Implement NAPI support
- In et131x_tx(), don't return NETDEV_TX_BUSY, just drop the packet with kfree_skb().
- Reduce the number of split lines by careful consideration of variable names etc.
- - Do this in et131x.c:
- struct fbr_lookup *fbr;
- fbr = rx_local->fbr[id];
- Then replace all the instances of "rx_local->fbr[id]" with fbr.
Please send patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index ab8b29d..e516bb6 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -54,7 +54,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -813,20 +812,21 @@ static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the receive dma configuration register for normal operation */
u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
- if (adapter->rx_ring.fbr[1]->buffsize == 4096)
+ if (rx_ring->fbr[1]->buffsize == 4096)
csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
- else if (adapter->rx_ring.fbr[1]->buffsize == 8192)
+ else if (rx_ring->fbr[1]->buffsize == 8192)
csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
- else if (adapter->rx_ring.fbr[1]->buffsize == 16384)
+ else if (rx_ring->fbr[1]->buffsize == 16384)
csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
csr |= ET_RXDMA_CSR_FBR0_ENABLE;
- if (adapter->rx_ring.fbr[0]->buffsize == 256)
+ if (rx_ring->fbr[0]->buffsize == 256)
csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
- else if (adapter->rx_ring.fbr[0]->buffsize == 512)
+ else if (rx_ring->fbr[0]->buffsize == 512)
csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
- else if (adapter->rx_ring.fbr[0]->buffsize == 1024)
+ else if (rx_ring->fbr[0]->buffsize == 1024)
csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
writel(csr, &adapter->regs->rxdma.csr);
@@ -968,7 +968,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
/* Set up the if mode bits */
cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
- if (phydev && phydev->speed == SPEED_1000) {
+ if (phydev->speed == SPEED_1000) {
cfg2 |= ET_MAC_CFG2_IFMODE_1000;
/* Phy mode bit */
ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
@@ -999,11 +999,11 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
/* Turn on duplex if needed */
- if (phydev && phydev->duplex == DUPLEX_FULL)
+ if (phydev->duplex == DUPLEX_FULL)
cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
- if (phydev && phydev->duplex == DUPLEX_HALF)
+ if (phydev->duplex == DUPLEX_HALF)
ifctrl |= ET_MAC_IFCTRL_GHDMODE;
writel(ifctrl, &mac->if_ctrl);
@@ -1039,9 +1039,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
*/
static int et1310_in_phy_coma(struct et131x_adapter *adapter)
{
- u32 pmcsr;
-
- pmcsr = readl(&adapter->regs->global.pm_csr);
+ u32 pmcsr = readl(&adapter->regs->global.pm_csr);
return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
}
@@ -1351,8 +1349,6 @@ static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
* @addr: the address of the transceiver
* @reg: the register to read
* @value: pointer to a 16-bit value in which the value will be stored
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
*/
static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
u8 reg, u16 *value)
@@ -1425,10 +1421,6 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
* @adapter: pointer to our private adapter structure
* @reg: the register to read
* @value: 16-bit value to write
- *
- * FIXME: one caller in netdev still
- *
- * Return 0 on success, errno on failure (as defined in errno.h)
*/
static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
{
@@ -1494,10 +1486,10 @@ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
return status;
}
-/* Still used from _mac for BIT_READ */
-static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
- u16 action, u16 regnum, u16 bitnum,
- u8 *value)
+static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
+ u16 regnum,
+ u16 bitnum,
+ u8 *value)
{
u16 reg;
u16 mask = 1 << bitnum;
@@ -1505,22 +1497,7 @@ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
/* Read the requested register */
et131x_mii_read(adapter, regnum, &reg);
- switch (action) {
- case TRUEPHY_BIT_READ:
- *value = (reg & mask) >> bitnum;
- break;
-
- case TRUEPHY_BIT_SET:
- et131x_mii_write(adapter, regnum, reg | mask);
- break;
-
- case TRUEPHY_BIT_CLEAR:
- et131x_mii_write(adapter, regnum, reg & ~mask);
- break;
-
- default:
- break;
- }
+ *value = (reg & mask) >> bitnum;
}
static void et1310_config_flow_control(struct et131x_adapter *adapter)
@@ -1532,27 +1509,19 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter)
} else {
char remote_pause, remote_async_pause;
- et1310_phy_access_mii_bit(adapter,
- TRUEPHY_BIT_READ, 5, 10, &remote_pause);
- et1310_phy_access_mii_bit(adapter,
- TRUEPHY_BIT_READ, 5, 11,
- &remote_async_pause);
+ et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
+ et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
- if ((remote_pause == TRUEPHY_BIT_SET) &&
- (remote_async_pause == TRUEPHY_BIT_SET)) {
+ if (remote_pause && remote_async_pause) {
adapter->flowcontrol = adapter->wanted_flow;
- } else if ((remote_pause == TRUEPHY_BIT_SET) &&
- (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
+ } else if (remote_pause && !remote_async_pause) {
if (adapter->wanted_flow == FLOW_BOTH)
adapter->flowcontrol = FLOW_BOTH;
else
adapter->flowcontrol = FLOW_NONE;
- } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
- (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
+ } else if (!remote_pause && !remote_async_pause) {
adapter->flowcontrol = FLOW_NONE;
- } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
- * remote_async_pause == TRUEPHY_SET_BIT)
- */
+ } else {
if (adapter->wanted_flow == FLOW_BOTH)
adapter->flowcontrol = FLOW_RXONLY;
else
@@ -1561,9 +1530,7 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter)
}
}
-/* et1310_update_macstat_host_counters - Update the local copy of the statistics
- * @adapter: pointer to the adapter structure
- */
+/* et1310_update_macstat_host_counters - Update local copy of the statistics */
static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
{
struct ce_stats *stats = &adapter->stats;
@@ -1589,7 +1556,6 @@ static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
}
/* et1310_handle_macstat_interrupt
- * @adapter: pointer to the adapter structure
*
* One of the MACSTAT counters has wrapped. Update the local copy of
* the statistics held in the adapter structure, checking the "wrap"
@@ -1679,7 +1645,7 @@ static int et131x_mdio_reset(struct mii_bus *bus)
return 0;
}
-/* et1310_phy_power_down - PHY power control
+/* et1310_phy_power_switch - PHY power control
* @adapter: device to control
* @down: true for off/false for back on
*
@@ -1688,7 +1654,7 @@ static int et131x_mdio_reset(struct mii_bus *bus)
* Can't you see that this code processed
* Phy power, phy power..
*/
-static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
+static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
{
u16 data;
@@ -1699,10 +1665,7 @@ static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
et131x_mii_write(adapter, MII_BMCR, data);
}
-/* et131x_xcvr_init - Init the phy if we are setting it into force mode
- * @adapter: pointer to our private adapter structure
- *
- */
+/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 lcr2;
@@ -1731,7 +1694,6 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
}
/* et131x_configure_global_regs - configure JAGCore global regs
- * @adapter: pointer to our adapter structure
*
* Used to configure the global registers on the JAGCore
*/
@@ -1776,9 +1738,7 @@ static void et131x_configure_global_regs(struct et131x_adapter *adapter)
writel(0, &regs->watchdog_timer);
}
-/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
- * @adapter: pointer to our adapter structure
- */
+/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
{
struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
@@ -1821,6 +1781,7 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
u32 __iomem *min_des;
u32 __iomem *base_hi;
u32 __iomem *base_lo;
+ struct fbr_lookup *fbr = rx_local->fbr[id];
if (id == 0) {
num_des = &rx_dma->fbr0_num_des;
@@ -1837,12 +1798,10 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
}
/* Now's the best time to initialize FBR contents */
- fbr_entry =
- (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr;
- for (entry = 0;
- entry < rx_local->fbr[id]->num_entries; entry++) {
- fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry];
- fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry];
+ fbr_entry = fbr->ring_virtaddr;
+ for (entry = 0; entry < fbr->num_entries; entry++) {
+ fbr_entry->addr_hi = fbr->bus_high[entry];
+ fbr_entry->addr_lo = fbr->bus_low[entry];
fbr_entry->word2 = entry;
fbr_entry++;
}
@@ -1850,19 +1809,16 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
/* Set the address and parameters of Free buffer ring 1 and 0
* into the 1310's registers
*/
- writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr),
- base_hi);
- writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr),
- base_lo);
- writel(rx_local->fbr[id]->num_entries - 1, num_des);
+ writel(upper_32_bits(fbr->ring_physaddr), base_hi);
+ writel(lower_32_bits(fbr->ring_physaddr), base_lo);
+ writel(fbr->num_entries - 1, num_des);
writel(ET_DMA10_WRAP, full_offset);
/* This variable tracks the free buffer ring 1 full position,
* so it has to match the above.
*/
- rx_local->fbr[id]->local_full = ET_DMA10_WRAP;
- writel(((rx_local->fbr[id]->num_entries *
- LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ fbr->local_full = ET_DMA10_WRAP;
+ writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
min_des);
}
@@ -1884,7 +1840,6 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
}
/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
- * @adapter: pointer to our private adapter structure
*
* Configure the transmit engine with the ring buffers we have created
* and prepare it for use.
@@ -1892,33 +1847,26 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
{
struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Load the hardware with the start of the transmit descriptor ring. */
- writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa),
- &txdma->pr_base_hi);
- writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa),
- &txdma->pr_base_lo);
+ writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
+ writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
/* Initialise the transmit DMA engine */
writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
/* Load the completion writeback physical address */
- writel(upper_32_bits(adapter->tx_ring.tx_status_pa),
- &txdma->dma_wb_base_hi);
- writel(lower_32_bits(adapter->tx_ring.tx_status_pa),
- &txdma->dma_wb_base_lo);
+ writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
+ writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
- *adapter->tx_ring.tx_status = 0;
+ *tx_ring->tx_status = 0;
writel(0, &txdma->service_request);
- adapter->tx_ring.send_idx = 0;
+ tx_ring->send_idx = 0;
}
-/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
static void et131x_adapter_setup(struct et131x_adapter *adapter)
{
/* Configure the JAGCore */
@@ -1938,13 +1886,11 @@ static void et131x_adapter_setup(struct et131x_adapter *adapter)
et1310_config_macstat_regs(adapter);
- et1310_phy_power_down(adapter, 0);
+ et1310_phy_power_switch(adapter, 0);
et131x_xcvr_init(adapter);
}
-/* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
static void et131x_soft_reset(struct et131x_adapter *adapter)
{
u32 reg;
@@ -1965,7 +1911,6 @@ static void et131x_soft_reset(struct et131x_adapter *adapter)
}
/* et131x_enable_interrupts - enable interrupt
- * @adapter: et131x device
*
* Enable the appropriate interrupts on the ET131x according to our
* configuration
@@ -1976,7 +1921,7 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter)
/* Enable all global interrupts */
if (adapter->flowcontrol == FLOW_TXONLY ||
- adapter->flowcontrol == FLOW_BOTH)
+ adapter->flowcontrol == FLOW_BOTH)
mask = INT_MASK_ENABLE;
else
mask = INT_MASK_ENABLE_NO_FLOW;
@@ -1985,7 +1930,6 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter)
}
/* et131x_disable_interrupts - interrupt disable
- * @adapter: et131x device
*
* Block all interrupts from the et131x device at the device itself
*/
@@ -1995,9 +1939,7 @@ static void et131x_disable_interrupts(struct et131x_adapter *adapter)
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
}
-/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
- * @adapter: pointer to our adapter structure
- */
+/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */
static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
{
/* Setup the tramsmit dma configuration register */
@@ -2005,9 +1947,7 @@ static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
&adapter->regs->txdma.csr);
}
-/* et131x_enable_txrx - Enable tx/rx queues
- * @netdev: device to be enabled
- */
+/* et131x_enable_txrx - Enable tx/rx queues */
static void et131x_enable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2024,9 +1964,7 @@ static void et131x_enable_txrx(struct net_device *netdev)
netif_start_queue(netdev);
}
-/* et131x_disable_txrx - Disable tx/rx queues
- * @netdev: device to be disabled
- */
+/* et131x_disable_txrx - Disable tx/rx queues */
static void et131x_disable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2042,18 +1980,12 @@ static void et131x_disable_txrx(struct net_device *netdev)
et131x_disable_interrupts(adapter);
}
-/* et131x_init_send - Initialize send data structures
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_init_send - Initialize send data structures */
static void et131x_init_send(struct et131x_adapter *adapter)
{
- struct tcb *tcb;
u32 ct;
- struct tx_ring *tx_ring;
-
- /* Setup some convenience pointers */
- tx_ring = &adapter->tx_ring;
- tcb = adapter->tx_ring.tcb_ring;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
+ struct tcb *tcb = tx_ring->tcb_ring;
tx_ring->tcb_qhead = tcb;
@@ -2076,7 +2008,6 @@ static void et131x_init_send(struct et131x_adapter *adapter)
}
/* et1310_enable_phy_coma - called when network cable is unplugged
- * @adapter: pointer to our adapter structure
*
* driver receive an phy status change interrupt while in D0 and check that
* phy_status is down.
@@ -2104,11 +2035,6 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
/* Save the GbE PHY speed and duplex modes. Need to restore this
* when cable is plugged back in
*/
- /* TODO - when PM is re-enabled, check if we need to
- * perform a similar task as this -
- * adapter->pdown_speed = adapter->ai_force_speed;
- * adapter->pdown_duplex = adapter->ai_force_duplex;
- */
/* Stop sending packets. */
spin_lock_irqsave(&adapter->send_hw_lock, flags);
@@ -2128,9 +2054,7 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
writel(pmcsr, &adapter->regs->global.pm_csr);
}
-/* et1310_disable_phy_coma - Disable the Phy Coma Mode
- * @adapter: pointer to our adapter structure
- */
+/* et1310_disable_phy_coma - Disable the Phy Coma Mode */
static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -2145,11 +2069,6 @@ static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
/* Restore the GbE PHY speed and duplex modes;
* Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
*/
- /* TODO - when PM is re-enabled, check if we need to
- * perform a similar task as this -
- * adapter->ai_force_speed = adapter->pdown_speed;
- * adapter->ai_force_duplex = adapter->pdown_duplex;
- */
/* Re-initialize the send structures */
et131x_init_send(adapter);
@@ -2183,15 +2102,12 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
tmp_free_buff_ring ^= ET_DMA10_WRAP;
}
/* For the 1023 case */
- tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
+ tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
*free_buff_ring = tmp_free_buff_ring;
return tmp_free_buff_ring;
}
/* et131x_rx_dma_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h)
*
* Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
* and the Packet Status Ring.
@@ -2203,10 +2119,8 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
u32 bufsize;
u32 pktstat_ringsize;
u32 fbr_chunksize;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
+ struct fbr_lookup *fbr;
/* Alloc memory for the lookup table */
rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
@@ -2247,20 +2161,18 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
rx_ring->fbr[1]->num_entries = 128;
}
- adapter->rx_ring.psr_num_entries =
- adapter->rx_ring.fbr[0]->num_entries +
- adapter->rx_ring.fbr[1]->num_entries;
+ rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries +
+ rx_ring->fbr[1]->num_entries;
for (id = 0; id < NUM_FBRS; id++) {
+ fbr = rx_ring->fbr[id];
/* Allocate an area of memory for Free Buffer Ring */
- bufsize =
- (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries);
- rx_ring->fbr[id]->ring_virtaddr =
- dma_alloc_coherent(&adapter->pdev->dev,
- bufsize,
- &rx_ring->fbr[id]->ring_physaddr,
- GFP_KERNEL);
- if (!rx_ring->fbr[id]->ring_virtaddr) {
+ bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
+ fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
+ bufsize,
+ &fbr->ring_physaddr,
+ GFP_KERNEL);
+ if (!fbr->ring_virtaddr) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Free Buffer Ring %d\n", id);
return -ENOMEM;
@@ -2268,25 +2180,25 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
}
for (id = 0; id < NUM_FBRS; id++) {
- fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize);
+ fbr = rx_ring->fbr[id];
+ fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
- for (i = 0;
- i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) {
+ for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
dma_addr_t fbr_tmp_physaddr;
- rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent(
+ fbr->mem_virtaddrs[i] = dma_alloc_coherent(
&adapter->pdev->dev, fbr_chunksize,
- &rx_ring->fbr[id]->mem_physaddrs[i],
+ &fbr->mem_physaddrs[i],
GFP_KERNEL);
- if (!rx_ring->fbr[id]->mem_virtaddrs[i]) {
+ if (!fbr->mem_virtaddrs[i]) {
dev_err(&adapter->pdev->dev,
"Could not alloc memory\n");
return -ENOMEM;
}
/* See NOTE in "Save Physical Address" comment above */
- fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i];
+ fbr_tmp_physaddr = fbr->mem_physaddrs[i];
for (j = 0; j < FBR_CHUNKS; j++) {
u32 index = (i * FBR_CHUNKS) + j;
@@ -2294,26 +2206,25 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
/* Save the Virtual address of this index for
* quick access later
*/
- rx_ring->fbr[id]->virt[index] =
- (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] +
- (j * rx_ring->fbr[id]->buffsize);
+ fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] +
+ (j * fbr->buffsize);
/* now store the physical address in the
* descriptor so the device can access it
*/
- rx_ring->fbr[id]->bus_high[index] =
+ fbr->bus_high[index] =
upper_32_bits(fbr_tmp_physaddr);
- rx_ring->fbr[id]->bus_low[index] =
+ fbr->bus_low[index] =
lower_32_bits(fbr_tmp_physaddr);
- fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize;
+ fbr_tmp_physaddr += fbr->buffsize;
}
}
}
/* Allocate an area of memory for FIFO of Packet Status ring entries */
pktstat_ringsize =
- sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
+ sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries;
rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
pktstat_ringsize,
@@ -2325,8 +2236,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
"Cannot alloc memory for Packet Status Ring\n");
return -ENOMEM;
}
- pr_info("Packet Status Ring %llx\n",
- (unsigned long long) rx_ring->ps_ring_physaddr);
/* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
* ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
@@ -2345,7 +2254,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
return -ENOMEM;
}
rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
- pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
/* The RFDs are going to be put on lists later on, so initialize the
* lists now.
@@ -2354,9 +2262,7 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
return 0;
}
-/* et131x_rx_dma_memory_free - Free all memory allocated within this module.
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_rx_dma_memory_free - Free all memory allocated within this module */
static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
{
u8 id;
@@ -2364,17 +2270,15 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
u32 bufsize;
u32 pktstat_ringsize;
struct rfd *rfd;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
+ struct fbr_lookup *fbr;
/* Free RFDs and associated packet descriptors */
WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
while (!list_empty(&rx_ring->recv_list)) {
- rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
- struct rfd, list_node);
+ rfd = list_entry(rx_ring->recv_list.next,
+ struct rfd, list_node);
list_del(&rfd->list_node);
rfd->skb = NULL;
@@ -2383,40 +2287,41 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
/* Free Free Buffer Rings */
for (id = 0; id < NUM_FBRS; id++) {
- if (!rx_ring->fbr[id]->ring_virtaddr)
+ fbr = rx_ring->fbr[id];
+
+ if (!fbr->ring_virtaddr)
continue;
/* First the packet memory */
for (index = 0;
- index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS);
+ index < fbr->num_entries / FBR_CHUNKS;
index++) {
- if (rx_ring->fbr[id]->mem_virtaddrs[index]) {
- bufsize =
- rx_ring->fbr[id]->buffsize * FBR_CHUNKS;
+ if (fbr->mem_virtaddrs[index]) {
+ bufsize = fbr->buffsize * FBR_CHUNKS;
dma_free_coherent(&adapter->pdev->dev,
- bufsize,
- rx_ring->fbr[id]->mem_virtaddrs[index],
- rx_ring->fbr[id]->mem_physaddrs[index]);
+ bufsize,
+ fbr->mem_virtaddrs[index],
+ fbr->mem_physaddrs[index]);
- rx_ring->fbr[id]->mem_virtaddrs[index] = NULL;
+ fbr->mem_virtaddrs[index] = NULL;
}
}
- bufsize =
- sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries;
+ bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
- dma_free_coherent(&adapter->pdev->dev, bufsize,
- rx_ring->fbr[id]->ring_virtaddr,
- rx_ring->fbr[id]->ring_physaddr);
+ dma_free_coherent(&adapter->pdev->dev,
+ bufsize,
+ fbr->ring_virtaddr,
+ fbr->ring_physaddr);
- rx_ring->fbr[id]->ring_virtaddr = NULL;
+ fbr->ring_virtaddr = NULL;
}
/* Free Packet Status Ring */
if (rx_ring->ps_ring_virtaddr) {
pktstat_ringsize = sizeof(struct pkt_stat_desc) *
- adapter->rx_ring.psr_num_entries;
+ rx_ring->psr_num_entries;
dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
rx_ring->ps_ring_virtaddr,
@@ -2441,20 +2346,12 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
rx_ring->num_ready_recv = 0;
}
-/* et131x_init_recv - Initialize receive data structures.
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h)
- */
+/* et131x_init_recv - Initialize receive data structures */
static int et131x_init_recv(struct et131x_adapter *adapter)
{
struct rfd *rfd;
u32 rfdct;
- u32 numrfd = 0;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
/* Setup each RFD */
for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
@@ -2467,24 +2364,18 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* Add this RFD to the recv_list */
list_add_tail(&rfd->list_node, &rx_ring->recv_list);
- /* Increment both the available RFD's, and the total RFD's. */
+ /* Increment the available RFD's */
rx_ring->num_ready_recv++;
- numrfd++;
}
return 0;
}
-/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
- * @adapter: pointer to our adapter structure
- */
+/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
- if (!phydev)
- return;
-
/* For version B silicon, we do not use the RxDMA timer for 10 and 100
* Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
*/
@@ -2505,11 +2396,13 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
u16 buff_index = rfd->bufferindex;
u8 ring_index = rfd->ringindex;
unsigned long flags;
+ struct fbr_lookup *fbr = rx_local->fbr[ring_index];
/* We don't use any of the OOB data besides status. Otherwise, we
* need to clean up OOB data
*/
- if (buff_index < rx_local->fbr[ring_index]->num_entries) {
+ if (buff_index < fbr->num_entries) {
+ u32 free_buff_ring;
u32 __iomem *offset;
struct fbr_desc *next;
@@ -2520,22 +2413,20 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
else
offset = &rx_dma->fbr1_full_offset;
- next = (struct fbr_desc *)
- (rx_local->fbr[ring_index]->ring_virtaddr) +
- INDEX10(rx_local->fbr[ring_index]->local_full);
+ next = (struct fbr_desc *)(fbr->ring_virtaddr) +
+ INDEX10(fbr->local_full);
/* Handle the Free Buffer Ring advancement here. Write
* the PA / Buffer Index for the returned buffer into
* the oldest (next to be freed)FBR entry
*/
- next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index];
- next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index];
+ next->addr_hi = fbr->bus_high[buff_index];
+ next->addr_lo = fbr->bus_low[buff_index];
next->word2 = buff_index;
- writel(bump_free_buff_ring(
- &rx_local->fbr[ring_index]->local_full,
- rx_local->fbr[ring_index]->num_entries - 1),
- offset);
+ free_buff_ring = bump_free_buff_ring(&fbr->local_full,
+ fbr->num_entries - 1);
+ writel(free_buff_ring, offset);
spin_unlock_irqrestore(&adapter->fbr_lock, flags);
} else {
@@ -2555,7 +2446,6 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
}
/* nic_rx_pkts - Checks the hardware for available packets
- * @adapter: pointer to our adapter
*
* Returns rfd, a pointer to our MPRFD.
*
@@ -2580,6 +2470,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
u32 word0;
u32 word1;
struct sk_buff *skb;
+ struct fbr_lookup *fbr;
/* RX Status block is written by the DMA engine prior to every
* interrupt. It contains the next to be used entry in the Packet
@@ -2601,6 +2492,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
*/
len = psr->word1 & 0xFFFF;
ring_index = (psr->word1 >> 26) & 0x03;
+ fbr = rx_local->fbr[ring_index];
buff_index = (psr->word1 >> 16) & 0x3FF;
word0 = psr->word0;
@@ -2616,8 +2508,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
- if (ring_index > 1 ||
- buff_index > rx_local->fbr[ring_index]->num_entries - 1) {
+ if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
/* Illegal buffer or ring index cannot be used by S/W*/
dev_err(&adapter->pdev->dev,
"NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
@@ -2629,7 +2520,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
spin_lock_irqsave(&adapter->rcv_lock, flags);
element = rx_local->recv_list.next;
- rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
+ rfd = list_entry(element, struct rfd, list_node);
if (!rfd) {
spin_unlock_irqrestore(&adapter->rcv_lock, flags);
@@ -2670,7 +2561,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
&& !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
&& !(adapter->packet_filter &
ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- buf = rx_local->fbr[ring_index]->virt[buff_index];
+ buf = fbr->virt[buff_index];
/* Loop through our list to see if the destination
* address of this packet matches one in our list.
@@ -2708,7 +2599,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
adapter->stats.unicast_pkts_rcvd++;
}
- if (len == 0) {
+ if (!len) {
rfd->len = 0;
goto out;
}
@@ -2723,9 +2614,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
adapter->net_stats.rx_bytes += rfd->len;
- memcpy(skb_put(skb, rfd->len),
- rx_local->fbr[ring_index]->virt[buff_index],
- rfd->len);
+ memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
@@ -2737,7 +2626,6 @@ out:
}
/* et131x_handle_recv_interrupt - Interrupt handler for receive processing
- * @adapter: pointer to our adapter
*
* Assumption, Rcv spinlock has been acquired.
*/
@@ -2746,11 +2634,12 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
struct rfd *rfd = NULL;
u32 count = 0;
bool done = true;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
/* Process up to available RFD's */
while (count < NUM_PACKETS_HANDLED) {
- if (list_empty(&adapter->rx_ring.recv_list)) {
- WARN_ON(adapter->rx_ring.num_ready_recv != 0);
+ if (list_empty(&rx_ring->recv_list)) {
+ WARN_ON(rx_ring->num_ready_recv != 0);
done = false;
break;
}
@@ -2774,25 +2663,22 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->net_stats.rx_packets++;
/* Set the status on the packet, either resources or success */
- if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK)
+ if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
count++;
}
if (count == NUM_PACKETS_HANDLED || !done) {
- adapter->rx_ring.unfinished_receives = true;
+ rx_ring->unfinished_receives = true;
writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
&adapter->regs->global.watchdog_timer);
} else
/* Watchdog timer will disable itself if appropriate. */
- adapter->rx_ring.unfinished_receives = false;
+ rx_ring->unfinished_receives = false;
}
/* et131x_tx_dma_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h).
*
* Allocates memory that will be visible both to the device and to the CPU.
* The OS will pass us packets, pointers to which we will insert in the Tx
@@ -2806,18 +2692,17 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
struct tx_ring *tx_ring = &adapter->tx_ring;
/* Allocate memory for the TCB's (Transmit Control Block) */
- adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
- GFP_ATOMIC | GFP_DMA);
- if (!adapter->tx_ring.tcb_ring)
+ tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
+ GFP_ATOMIC | GFP_DMA);
+ if (!tx_ring->tcb_ring)
return -ENOMEM;
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
- tx_ring->tx_desc_ring =
- (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
- desc_size,
- &tx_ring->tx_desc_ring_pa,
- GFP_KERNEL);
- if (!adapter->tx_ring.tx_desc_ring) {
+ tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
+ desc_size,
+ &tx_ring->tx_desc_ring_pa,
+ GFP_KERNEL);
+ if (!tx_ring->tx_desc_ring) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx Ring\n");
return -ENOMEM;
@@ -2835,51 +2720,46 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
sizeof(u32),
&tx_ring->tx_status_pa,
GFP_KERNEL);
- if (!adapter->tx_ring.tx_status_pa) {
+ if (!tx_ring->tx_status_pa) {
dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Tx status block\n");
+ "Cannot alloc memory for Tx status block\n");
return -ENOMEM;
}
return 0;
}
-/* et131x_tx_dma_memory_free - Free all memory allocated within this module
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h).
- */
+/* et131x_tx_dma_memory_free - Free all memory allocated within this module */
static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
{
int desc_size = 0;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
- if (adapter->tx_ring.tx_desc_ring) {
+ if (tx_ring->tx_desc_ring) {
/* Free memory relating to Tx rings here */
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
dma_free_coherent(&adapter->pdev->dev,
- desc_size,
- adapter->tx_ring.tx_desc_ring,
- adapter->tx_ring.tx_desc_ring_pa);
- adapter->tx_ring.tx_desc_ring = NULL;
+ desc_size,
+ tx_ring->tx_desc_ring,
+ tx_ring->tx_desc_ring_pa);
+ tx_ring->tx_desc_ring = NULL;
}
/* Free memory for the Tx status block */
- if (adapter->tx_ring.tx_status) {
+ if (tx_ring->tx_status) {
dma_free_coherent(&adapter->pdev->dev,
- sizeof(u32),
- adapter->tx_ring.tx_status,
- adapter->tx_ring.tx_status_pa);
+ sizeof(u32),
+ tx_ring->tx_status,
+ tx_ring->tx_status_pa);
- adapter->tx_ring.tx_status = NULL;
+ tx_ring->tx_status = NULL;
}
/* Free the memory for the tcb structures */
- kfree(adapter->tx_ring.tcb_ring);
+ kfree(tx_ring->tcb_ring);
}
/* nic_send_packet - NIC specific send handler for version B silicon.
* @adapter: pointer to our adapter
* @tcb: pointer to struct tcb
- *
- * Returns 0 or errno.
*/
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
@@ -2893,6 +2773,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
unsigned long flags;
struct phy_device *phydev = adapter->phydev;
dma_addr_t dma_addr;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Part of the optimizations of this send routine restrict us to
* sending 24 fragments at a pass. In practice we should never see
@@ -2968,11 +2849,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
}
if (phydev && phydev->speed == SPEED_1000) {
- if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
+ if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
/* Last element & Interrupt flag */
desc[frag - 1].flags =
TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
- adapter->tx_ring.since_irq = 0;
+ tx_ring->since_irq = 0;
} else { /* Last element */
desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
}
@@ -2982,12 +2863,12 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
- tcb->index_start = adapter->tx_ring.send_idx;
+ tcb->index_start = tx_ring->send_idx;
tcb->stale = 0;
spin_lock_irqsave(&adapter->send_hw_lock, flags);
- thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx);
+ thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
if (thiscopy >= frag) {
remainder = 0;
@@ -2996,52 +2877,51 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
remainder = frag - thiscopy;
}
- memcpy(adapter->tx_ring.tx_desc_ring +
- INDEX10(adapter->tx_ring.send_idx), desc,
+ memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
+ desc,
sizeof(struct tx_desc) * thiscopy);
- add_10bit(&adapter->tx_ring.send_idx, thiscopy);
+ add_10bit(&tx_ring->send_idx, thiscopy);
- if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
- INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
- adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
- adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
+ if (INDEX10(tx_ring->send_idx) == 0 ||
+ INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
+ tx_ring->send_idx &= ~ET_DMA10_MASK;
+ tx_ring->send_idx ^= ET_DMA10_WRAP;
}
if (remainder) {
- memcpy(adapter->tx_ring.tx_desc_ring,
+ memcpy(tx_ring->tx_desc_ring,
desc + thiscopy,
sizeof(struct tx_desc) * remainder);
- add_10bit(&adapter->tx_ring.send_idx, remainder);
+ add_10bit(&tx_ring->send_idx, remainder);
}
- if (INDEX10(adapter->tx_ring.send_idx) == 0) {
- if (adapter->tx_ring.send_idx)
+ if (INDEX10(tx_ring->send_idx) == 0) {
+ if (tx_ring->send_idx)
tcb->index = NUM_DESC_PER_RING_TX - 1;
else
tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
} else
- tcb->index = adapter->tx_ring.send_idx - 1;
+ tcb->index = tx_ring->send_idx - 1;
spin_lock(&adapter->tcb_send_qlock);
- if (adapter->tx_ring.send_tail)
- adapter->tx_ring.send_tail->next = tcb;
+ if (tx_ring->send_tail)
+ tx_ring->send_tail->next = tcb;
else
- adapter->tx_ring.send_head = tcb;
+ tx_ring->send_head = tcb;
- adapter->tx_ring.send_tail = tcb;
+ tx_ring->send_tail = tcb;
WARN_ON(tcb->next != NULL);
- adapter->tx_ring.used++;
+ tx_ring->used++;
spin_unlock(&adapter->tcb_send_qlock);
/* Write the new write pointer back to the device. */
- writel(adapter->tx_ring.send_idx,
- &adapter->regs->txdma.service_request);
+ writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
* timer to wake us up if this packet isn't followed by N more.
@@ -3056,19 +2936,16 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
}
/* send_packet - Do the work to send a packet
- * @skb: the packet(s) to send
- * @adapter: a pointer to the device's private adapter structure
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only.
*
* Assumption: Send spinlock has been acquired
*/
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
{
int status;
- struct tcb *tcb = NULL;
+ struct tcb *tcb;
u16 *shbufva;
unsigned long flags;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* All packets must have at least a MAC address and a protocol type */
if (skb->len < ETH_HLEN)
@@ -3077,17 +2954,17 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
/* Get a TCB for this packet */
spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
- tcb = adapter->tx_ring.tcb_qhead;
+ tcb = tx_ring->tcb_qhead;
if (tcb == NULL) {
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
return -ENOMEM;
}
- adapter->tx_ring.tcb_qhead = tcb->next;
+ tx_ring->tcb_qhead = tcb->next;
- if (adapter->tx_ring.tcb_qhead == NULL)
- adapter->tx_ring.tcb_qtail = NULL;
+ if (tx_ring->tcb_qhead == NULL)
+ tx_ring->tcb_qtail = NULL;
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
@@ -3111,30 +2988,26 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
if (status != 0) {
spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
- if (adapter->tx_ring.tcb_qtail)
- adapter->tx_ring.tcb_qtail->next = tcb;
+ if (tx_ring->tcb_qtail)
+ tx_ring->tcb_qtail->next = tcb;
else
/* Apparently ready Q is empty. */
- adapter->tx_ring.tcb_qhead = tcb;
+ tx_ring->tcb_qhead = tcb;
- adapter->tx_ring.tcb_qtail = tcb;
+ tx_ring->tcb_qtail = tcb;
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
return status;
}
- WARN_ON(adapter->tx_ring.used > NUM_TCB);
+ WARN_ON(tx_ring->used > NUM_TCB);
return 0;
}
-/* et131x_send_packets - This function is called by the OS to send packets
- * @skb: the packet(s) to send
- * @netdev:device on which to TX the above packet(s)
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only
- */
+/* et131x_send_packets - This function is called by the OS to send packets */
static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Send these packets
*
@@ -3143,7 +3016,7 @@ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
*/
/* TCB is not available */
- if (adapter->tx_ring.used >= NUM_TCB) {
+ if (tx_ring->used >= NUM_TCB) {
/* NOTE: If there's an error on send, no need to queue the
* packet under Linux; if we just send an error up to the
* netif layer, it will resend the skb to us.
@@ -3187,6 +3060,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
unsigned long flags;
struct tx_desc *desc = NULL;
struct net_device_stats *stats = &adapter->net_stats;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
u64 dma_addr;
if (tcb->flags & FMP_DEST_BROAD)
@@ -3204,9 +3078,8 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
* they point to
*/
do {
- desc = (struct tx_desc *)
- (adapter->tx_ring.tx_desc_ring +
- INDEX10(tcb->index_start));
+ desc = tx_ring->tx_desc_ring +
+ INDEX10(tcb->index_start);
dma_addr = desc->addr_lo;
dma_addr |= (u64)desc->addr_hi << 32;
@@ -3221,8 +3094,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
tcb->index_start &= ~ET_DMA10_MASK;
tcb->index_start ^= ET_DMA10_WRAP;
}
- } while (desc != (adapter->tx_ring.tx_desc_ring +
- INDEX10(tcb->index)));
+ } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
dev_kfree_skb_any(tcb->skb);
}
@@ -3234,20 +3106,19 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
adapter->net_stats.tx_packets++;
- if (adapter->tx_ring.tcb_qtail)
- adapter->tx_ring.tcb_qtail->next = tcb;
+ if (tx_ring->tcb_qtail)
+ tx_ring->tcb_qtail->next = tcb;
else
/* Apparently ready Q is empty. */
- adapter->tx_ring.tcb_qhead = tcb;
+ tx_ring->tcb_qhead = tcb;
- adapter->tx_ring.tcb_qtail = tcb;
+ tx_ring->tcb_qtail = tcb;
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
- WARN_ON(adapter->tx_ring.used < 0);
+ WARN_ON(tx_ring->used < 0);
}
/* et131x_free_busy_send_packets - Free and complete the stopped active sends
- * @adapter: pointer to our adapter
*
* Assumption - Send spinlock has been acquired
*/
@@ -3256,21 +3127,22 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
struct tcb *tcb;
unsigned long flags;
u32 freed = 0;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Any packets being sent? Check the first TCB on the send list */
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
while (tcb != NULL && freed < NUM_TCB) {
struct tcb *next = tcb->next;
- adapter->tx_ring.send_head = next;
+ tx_ring->send_head = next;
if (next == NULL)
- adapter->tx_ring.send_tail = NULL;
+ tx_ring->send_tail = NULL;
- adapter->tx_ring.used--;
+ tx_ring->used--;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
@@ -3279,18 +3151,17 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
}
WARN_ON(freed == NUM_TCB);
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
- adapter->tx_ring.used = 0;
+ tx_ring->used = 0;
}
/* et131x_handle_send_interrupt - Interrupt handler for sending processing
- * @adapter: pointer to our adapter
*
* Re-claim the send resources, complete sends and get more to send from
* the send wait queue.
@@ -3303,6 +3174,7 @@ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
u32 serviced;
struct tcb *tcb;
u32 index;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
serviced = readl(&adapter->regs->txdma.new_service_complete);
index = INDEX10(serviced);
@@ -3312,41 +3184,41 @@ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
*/
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
while (tcb &&
((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
index < INDEX10(tcb->index)) {
- adapter->tx_ring.used--;
- adapter->tx_ring.send_head = tcb->next;
+ tx_ring->used--;
+ tx_ring->send_head = tcb->next;
if (tcb->next == NULL)
- adapter->tx_ring.send_tail = NULL;
+ tx_ring->send_tail = NULL;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
free_send_packet(adapter, tcb);
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
/* Goto the next packet */
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
}
while (tcb &&
!((serviced ^ tcb->index) & ET_DMA10_WRAP)
&& index > (tcb->index & ET_DMA10_MASK)) {
- adapter->tx_ring.used--;
- adapter->tx_ring.send_head = tcb->next;
+ tx_ring->used--;
+ tx_ring->send_head = tcb->next;
if (tcb->next == NULL)
- adapter->tx_ring.send_tail = NULL;
+ tx_ring->send_tail = NULL;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
free_send_packet(adapter, tcb);
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
/* Goto the next packet */
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
}
/* Wake up the queue when we hit a low-water mark */
- if (adapter->tx_ring.used <= NUM_TCB / 3)
+ if (tx_ring->used <= NUM_TCB / 3)
netif_wake_queue(adapter->netdev);
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
@@ -3548,9 +3420,7 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-/* et131x_hwaddr_init - set up the MAC Address on the ET1310
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_hwaddr_init - set up the MAC Address on the ET1310 */
static void et131x_hwaddr_init(struct et131x_adapter *adapter)
{
/* If have our default mac from init and no mac address from
@@ -3580,14 +3450,12 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter)
}
/* et131x_pci_init - initial PCI setup
- * @adapter: pointer to our private adapter structure
- * @pdev: our PCI device
*
* Perform the initial setup of PCI registers and if possible initialise
* the MAC address. At this point the I/O registers have yet to be mapped
*/
static int et131x_pci_init(struct et131x_adapter *adapter,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
u16 max_payload;
int i, rc;
@@ -3704,21 +3572,14 @@ static void et131x_error_timer_handler(unsigned long data)
mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
}
-/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */
static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
{
- /* Free DMA memory */
et131x_tx_dma_memory_free(adapter);
et131x_rx_dma_memory_free(adapter);
}
/* et131x_adapter_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h).
- *
* Allocate all the memory blocks for send, receive and others.
*/
static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
@@ -3727,14 +3588,14 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Allocate memory for the Tx Ring */
status = et131x_tx_dma_memory_alloc(adapter);
- if (status != 0) {
+ if (status) {
dev_err(&adapter->pdev->dev,
"et131x_tx_dma_memory_alloc FAILED\n");
return status;
}
/* Receive buffer memory allocation */
status = et131x_rx_dma_memory_alloc(adapter);
- if (status != 0) {
+ if (status) {
dev_err(&adapter->pdev->dev,
"et131x_rx_dma_memory_alloc FAILED\n");
et131x_tx_dma_memory_free(adapter);
@@ -3744,8 +3605,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Init receive data structures */
status = et131x_init_recv(adapter);
if (status) {
- dev_err(&adapter->pdev->dev,
- "et131x_init_recv FAILED\n");
+ dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
et131x_adapter_memory_free(adapter);
}
return status;
@@ -3756,97 +3616,89 @@ static void et131x_adjust_link(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = adapter->phydev;
- if (phydev && phydev->link != adapter->link) {
- /* Check to see if we are in coma mode and if
- * so, disable it because we will not be able
- * to read PHY values until we are out.
- */
- if (et1310_in_phy_coma(adapter))
- et1310_disable_phy_coma(adapter);
-
- adapter->link = phydev->link;
- phy_print_status(phydev);
-
- if (phydev->link) {
- adapter->boot_coma = 20;
- if (phydev && phydev->speed == SPEED_10) {
- /* NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
- * EMI_TRUEPHY_A13O) {
- */
- u16 register18;
-
- et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
- &register18);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18 | 0x4);
- et131x_mii_write(adapter, PHY_INDEX_REG,
- register18 | 0x8402);
- et131x_mii_write(adapter, PHY_DATA_REG,
- register18 | 511);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18);
- }
+ if (!phydev)
+ return;
+ if (phydev->link == adapter->link)
+ return;
- et1310_config_flow_control(adapter);
+ /* Check to see if we are in coma mode and if
+ * so, disable it because we will not be able
+ * to read PHY values until we are out.
+ */
+ if (et1310_in_phy_coma(adapter))
+ et1310_disable_phy_coma(adapter);
- if (phydev && phydev->speed == SPEED_1000 &&
- adapter->registry_jumbo_packet > 2048) {
- u16 reg;
+ adapter->link = phydev->link;
+ phy_print_status(phydev);
- et131x_mii_read(adapter, PHY_CONFIG, &reg);
- reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
- reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
- et131x_mii_write(adapter, PHY_CONFIG, reg);
- }
+ if (phydev->link) {
+ adapter->boot_coma = 20;
+ if (phydev->speed == SPEED_10) {
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
- et131x_set_rx_dma_timer(adapter);
- et1310_config_mac_regs2(adapter);
- } else {
- adapter->boot_coma = 0;
+ et1310_config_flow_control(adapter);
- if (phydev->speed == SPEED_10) {
- /* NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
- * EMI_TRUEPHY_A13O)
- */
- u16 register18;
-
- et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
- &register18);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18 | 0x4);
- et131x_mii_write(adapter, PHY_INDEX_REG,
- register18 | 0x8402);
- et131x_mii_write(adapter, PHY_DATA_REG,
- register18 | 511);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18);
- }
+ if (phydev->speed == SPEED_1000 &&
+ adapter->registry_jumbo_packet > 2048) {
+ u16 reg;
- /* Free the packets being actively sent & stopped */
- et131x_free_busy_send_packets(adapter);
+ et131x_mii_read(adapter, PHY_CONFIG, &reg);
+ reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
+ reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
+ et131x_mii_write(adapter, PHY_CONFIG, reg);
+ }
- /* Re-initialize the send structures */
- et131x_init_send(adapter);
+ et131x_set_rx_dma_timer(adapter);
+ et1310_config_mac_regs2(adapter);
+ } else {
+ adapter->boot_coma = 0;
+
+ if (phydev->speed == SPEED_10) {
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
- /* Bring the device back to the state it was during
- * init prior to autonegotiation being complete. This
- * way, when we get the auto-neg complete interrupt,
- * we can complete init by calling config_mac_regs2.
- */
- et131x_soft_reset(adapter);
+ /* Free the packets being actively sent & stopped */
+ et131x_free_busy_send_packets(adapter);
- /* Setup ET1310 as per the documentation */
- et131x_adapter_setup(adapter);
+ /* Re-initialize the send structures */
+ et131x_init_send(adapter);
- /* perform reset of tx/rx */
- et131x_disable_txrx(netdev);
- et131x_enable_txrx(netdev);
- }
+ /* Bring the device back to the state it was during
+ * init prior to autonegotiation being complete. This
+ * way, when we get the auto-neg complete interrupt,
+ * we can complete init by calling config_mac_regs2.
+ */
+ et131x_soft_reset(adapter);
+
+ /* Setup ET1310 as per the documentation */
+ et131x_adapter_setup(adapter);
+ /* perform reset of tx/rx */
+ et131x_disable_txrx(netdev);
+ et131x_enable_txrx(netdev);
}
}
@@ -3883,21 +3735,20 @@ static int et131x_mii_probe(struct net_device *netdev)
phydev->advertising = phydev->supported;
adapter->phydev = phydev;
- dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ dev_info(&adapter->pdev->dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
return 0;
}
/* et131x_adapter_init
- * @adapter: pointer to the private adapter struct
- * @pdev: pointer to the PCI device
*
* Initialize the data structures for the et131x_adapter object and link
* them together with the platform provided device structures.
*/
static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
@@ -3925,7 +3776,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
}
/* et131x_pci_remove
- * @pdev: a pointer to the device's pci_dev structure
*
* Registered in the pci_driver structure, this function is called when the
* PCI subsystem detects that a PCI device which matches the information
@@ -3952,9 +3802,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/* et131x_up - Bring up a device for use.
- * @netdev: device to be opened
- */
+/* et131x_up - Bring up a device for use. */
static void et131x_up(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -3963,9 +3811,7 @@ static void et131x_up(struct net_device *netdev)
phy_start(adapter->phydev);
}
-/* et131x_down - Bring down the device
- * @netdev: device to be brought down
- */
+/* et131x_down - Bring down the device */
static void et131x_down(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4022,7 +3868,9 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
{
bool handled = true;
struct net_device *netdev = (struct net_device *)dev_id;
- struct et131x_adapter *adapter = NULL;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct rx_ring *rx_ring = &adapter->rx_ring;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
u32 status;
if (!netif_device_present(netdev)) {
@@ -4030,8 +3878,6 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
goto out;
}
- adapter = netdev_priv(netdev);
-
/* If the adapter is in low power state, then it should not
* recognize any interrupt
*/
@@ -4061,13 +3907,13 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
/* This is our interrupt, so process accordingly */
if (status & ET_INTR_WATCHDOG) {
- struct tcb *tcb = adapter->tx_ring.send_head;
+ struct tcb *tcb = tx_ring->send_head;
if (tcb)
if (++tcb->stale > 1)
status |= ET_INTR_TXDMA_ISR;
- if (adapter->rx_ring.unfinished_receives)
+ if (rx_ring->unfinished_receives)
status |= ET_INTR_RXDMA_XFR_DONE;
else if (tcb == NULL)
writel(0, &adapter->regs->global.watchdog_timer);
@@ -4075,7 +3921,7 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
status &= ~ET_INTR_WATCHDOG;
}
- if (status == 0) {
+ if (!status) {
/* This interrupt has in some way been "handled" by
* the ISR. Either it was a spurious Rx interrupt, or
* it was a Tx interrupt that has been filtered by
@@ -4101,7 +3947,6 @@ out:
}
/* et131x_isr_handler - The ISR handler
- * @p_adapter, a pointer to the device's private adapter structure
*
* scheduled to run in a deferred context by the ISR. This is where the ISR's
* work actually gets done.
@@ -4125,17 +3970,15 @@ static void et131x_isr_handler(struct work_struct *work)
if (status & ET_INTR_RXDMA_XFR_DONE)
et131x_handle_recv_interrupt(adapter);
- status &= 0xffffffd7;
+ status &= ~(ET_INTR_TXDMA_ERR | ET_INTR_RXDMA_XFR_DONE);
if (!status)
goto out;
/* Handle the TXDMA Error interrupt */
if (status & ET_INTR_TXDMA_ERR) {
- u32 txdma_err;
-
/* Following read also clears the register (COR) */
- txdma_err = readl(&iomem->txdma.tx_dma_error);
+ u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
dev_warn(&adapter->pdev->dev,
"TXDMA_ERR interrupt, error = %d\n",
@@ -4281,11 +4124,7 @@ out:
et131x_enable_interrupts(adapter);
}
-/* et131x_stats - Return the current device statistics.
- * @netdev: device whose stats are being queried
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_stats - Return the current device statistics */
static struct net_device_stats *et131x_stats(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4327,11 +4166,7 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev)
return stats;
}
-/* et131x_open - Open the device for use.
- * @netdev: device to be opened
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_open - Open the device for use. */
static int et131x_open(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4360,11 +4195,7 @@ static int et131x_open(struct net_device *netdev)
return result;
}
-/* et131x_close - Close the device
- * @netdev: device to be closed
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_close - Close the device */
static int et131x_close(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4382,8 +4213,6 @@ static int et131x_close(struct net_device *netdev)
* @netdev: device on which the control request is being made
* @reqbuf: a pointer to the IOCTL request buffer
* @cmd: the IOCTL command code
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
*/
static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
int cmd)
@@ -4400,8 +4229,6 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
* @adapter: pointer to our private adapter structure
*
* FIXME: lot of dups with MAC code
- *
- * Returns 0 on success, errno on failure
*/
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
@@ -4460,9 +4287,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
return status;
}
-/* et131x_multicast - The handler to configure multicasting on the interface
- * @netdev: a pointer to a net_device struct representing the device
- */
+/* et131x_multicast - The handler to configure multicasting on the interface */
static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4522,27 +4347,21 @@ static void et131x_multicast(struct net_device *netdev)
* NOTE - This block will always update the multicast_list with the
* hardware, even if the addresses aren't the same.
*/
- if (packet_filter != adapter->packet_filter) {
- /* Call the device's filter function */
+ if (packet_filter != adapter->packet_filter)
et131x_set_packet_filter(adapter);
- }
+
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/* et131x_tx - The handler to tx a packet on the device
- * @skb: data to be Tx'd
- * @netdev: device on which data is to be Tx'd
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_tx - The handler to tx a packet on the device */
static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* stop the queue if it's getting full */
- if (adapter->tx_ring.used >= NUM_TCB - 1 &&
- !netif_queue_stopped(netdev))
+ if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
/* Save the timestamp for the TX timeout watchdog */
@@ -4562,7 +4381,6 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
}
/* et131x_tx_timeout - Timeout handler
- * @netdev: a pointer to a net_device struct representing the device
*
* The handler called when a Tx request times out. The timeout period is
* specified by the 'tx_timeo" element in the net_device structure (see
@@ -4571,6 +4389,7 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
static void et131x_tx_timeout(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tx_ring *tx_ring = &adapter->tx_ring;
struct tcb *tcb;
unsigned long flags;
@@ -4593,7 +4412,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
/* Is send stuck? */
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
if (tcb != NULL) {
tcb->count++;
@@ -4619,12 +4438,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-/* et131x_change_mtu - The handler called to change the MTU for the device
- * @netdev: device whose MTU is to be changed
- * @new_mtu: the desired MTU
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_change_mtu - The handler called to change the MTU for the device */
static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
{
int result = 0;
@@ -4669,22 +4483,13 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
return result;
}
-/* et131x_set_mac_addr - handler to change the MAC address for the device
- * @netdev: device whose MAC is to be changed
- * @new_mac: the desired MAC address
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- *
- * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
- */
+/* et131x_set_mac_addr - handler to change the MAC address for the device */
static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
{
int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
struct sockaddr *address = new_mac;
- /* begin blux */
-
if (adapter == NULL)
return -ENODEV;
@@ -4746,15 +4551,13 @@ static const struct net_device_ops et131x_netdev_ops = {
* @pdev: a pointer to the device's pci_dev structure
* @ent: this device's entry in the pci_device_id table
*
- * Returns 0 on success, errno on failure (as defined in errno.h)
- *
* Registered in the pci_driver structure, this function is called when the
* PCI subsystem finds a new PCI device which matches the information
* contained in the pci_device_id table. This routine is the equivalent to
* a device insertion routine.
*/
static int et131x_pci_setup(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct et131x_adapter *adapter;
@@ -4930,7 +4733,7 @@ err_disable:
goto out;
}
-static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
+static const struct pci_device_id et131x_pci_table[] = {
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
{0,}
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index bbe78a7..2ac6e99 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -1668,43 +1668,3 @@ struct address_map {
#define LED_100TX_SHIFT 4
/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */
-
-/* Defines for PHY access routines */
-
-/* Define bit operation flags */
-#define TRUEPHY_BIT_CLEAR 0
-#define TRUEPHY_BIT_SET 1
-#define TRUEPHY_BIT_READ 2
-
-/* Define read/write operation flags */
-#ifndef TRUEPHY_READ
-#define TRUEPHY_READ 0
-#define TRUEPHY_WRITE 1
-#define TRUEPHY_MASK 2
-#endif
-
-/* Define master/slave configuration values */
-#define TRUEPHY_CFG_SLAVE 0
-#define TRUEPHY_CFG_MASTER 1
-
-/* Define MDI/MDI-X settings */
-#define TRUEPHY_MDI 0
-#define TRUEPHY_MDIX 1
-#define TRUEPHY_AUTO_MDI_MDIX 2
-
-/* Define 10Base-T link polarities */
-#define TRUEPHY_POLARITY_NORMAL 0
-#define TRUEPHY_POLARITY_INVERTED 1
-
-/* Define auto-negotiation results */
-#define TRUEPHY_ANEG_NOT_COMPLETE 0
-#define TRUEPHY_ANEG_COMPLETE 1
-#define TRUEPHY_ANEG_DISABLED 2
-
-/* Define duplex advertisement flags */
-#define TRUEPHY_ADV_DUPLEX_NONE 0x00
-#define TRUEPHY_ADV_DUPLEX_FULL 0x01
-#define TRUEPHY_ADV_DUPLEX_HALF 0x02
-#define TRUEPHY_ADV_DUPLEX_BOTH \
- (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
-
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 817f837..edd5cef 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kobject.h>
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 074b0e5..0e499ce 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
index 9dce54e..60c015c 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
@@ -1,158 +1,158 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2002 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File: boot.h
-//
-// Description: boatloader
-//
-// History:
-// 1/11/05 Whc Ported to Linux.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+ ---------------------------------------------------------------------------
+
+ File: boot.h
+
+ Description: boatloader
+
+ History:
+ 1/11/05 Whc Ported to Linux.
+
+---------------------------------------------------------------------------*/
#ifndef _BOOTH_
#define _BOOTH_
-// Official bootloader
-static unsigned char bootimage [] = {
-0x00,0x00,0x01,0x5E,0x00,0x00
-,0x00,0x00,0x00,0x00,0x02,0xD7
-,0x00,0x00,0x01,0x5E,0x46,0xB3
-,0xE6,0x02,0x00,0x98,0xE6,0x8C
-,0x00,0x98,0xFB,0x92,0xFF,0xFF
-,0x98,0xFB,0x94,0xFF,0xFF,0x98
-,0xFB,0x06,0x08,0x00,0x98,0xFB
-,0x96,0x84,0x00,0x98,0xFB,0x08
-,0x1C,0x00,0x98,0xFB,0x51,0x25
-,0x10,0x1C,0x00,0xE6,0x51,0x01
-,0x07,0xFD,0x4C,0xFF,0x20,0xF5
-,0x51,0x02,0x20,0x08,0x00,0x4C
-,0xFF,0x20,0x3C,0x00,0xC0,0x64
-,0x98,0xC0,0x66,0x98,0xC0,0x68
-,0x98,0xC0,0x6A,0x98,0xC0,0x6C
-,0x98,0x90,0x08,0x90,0x09,0x90
-,0x0A,0x90,0x0B,0x90,0x0C,0x90
-,0x0D,0x90,0x0E,0x90,0x0F,0x90
-,0x04,0x90,0x06,0xFB,0x51,0x22
-,0x16,0x08,0x03,0xFB,0x51,0x52
-,0x16,0x08,0x04,0xFB,0x51,0x24
-,0x2B,0x08,0x06,0xFB,0x51,0x54
-,0x2B,0x08,0x07,0xFB,0x51,0x24
-,0x2B,0x08,0x09,0xFB,0x51,0x54
-,0x2B,0x08,0x0A,0xFB,0x51,0x12
-,0x16,0x08,0x0C,0xFB,0x51,0x52
-,0x16,0x08,0x0D,0x78,0x00,0x00
-,0x00,0x16,0x00,0x00,0xEC,0x31
-,0xAE,0x00,0x00,0x81,0x4C,0x0F
-,0xE6,0x43,0xFF,0xEC,0x31,0x4E
-,0x00,0x00,0x91,0xEC,0x31,0xAE
-,0x00,0x00,0x91,0x4C,0x0F,0xE6
-,0x43,0xFF,0xEC,0x31,0x5E,0x00
-,0x00,0xA1,0xEB,0x31,0x08,0x00
-,0x00,0xA6,0xEB,0x31,0x08,0x00
-,0x00,0xAC,0x3C,0x00,0xEB,0x31
-,0x08,0x00,0x00,0xA8,0x76,0xFE
-,0xFE,0x08,0xEB,0x31,0x08,0x20
-,0x00,0x00,0x76,0xFF,0xFF,0x18
-,0xED,0x31,0x08,0x20,0x00,0x00
-,0x26,0x10,0x04,0x10,0xF5,0x3C
-,0x01,0x3C,0x00,0x08,0x01,0x12
-,0x3C,0x11,0x3C,0x00,0x08,0x01
-,0x0B,0x08,0x00,0x6D,0xEC,0x31
-,0xAE,0x20,0x00,0x06,0xED,0x4D
-,0x08,0x00,0x00,0x67,0x80,0x6F
-,0x00,0x01,0x0B,0x6F,0x00,0x02
-,0x2E,0x76,0xEE,0x01,0x48,0x06
-,0x01,0x39,0xED,0x4D,0x18,0x00
-,0x02,0xED,0x4D,0x08,0x00,0x04
-,0x14,0x06,0xA4,0xED,0x31,0x22
-,0x00,0x00,0xAC,0x76,0xEE,0x07
-,0x48,0x6D,0x22,0x01,0x1E,0x08
-,0x01,0x58,0xEB,0x31,0x08,0x00
-,0x00,0xAC,0x06,0xFF,0xBA,0x3C
-,0x00,0xEB,0x31,0x08,0x20,0x00
-,0x04,0x3C,0x30,0xEB,0x31,0x08
-,0x20,0x00,0x02,0x3C,0x10,0xEB
-,0x31,0x08,0x20,0x00,0x00,0xED
-,0x31,0x08,0x20,0x00,0x00,0x04
-,0x10,0xF7,0xED,0x31,0x08,0x00
-,0x00,0xA2,0x91,0x00,0x9C,0x3C
-,0x80,0xEB,0x31,0x08,0x20,0x00
-,0x04,0x3C,0x20,0xEB,0x31,0x08
-,0x20,0x00,0x02,0x3C,0x10,0xEB
-,0x31,0x08,0x20,0x00,0x00,0xED
-,0x31,0x08,0x20,0x00,0x00,0x04
-,0x10,0xF7,0xED,0x31,0x08,0x20
-,0x00,0x04,0x42,0x10,0x90,0x08
-,0xEC,0x31,0xAE,0x20,0x00,0x06
-,0xA4,0x41,0x08,0x00,0xB6,0xED
-,0x41,0x28,0x7D,0xFF,0xFF,0x22
-,0xB3,0x40,0x98,0x2A,0x32,0xEB
-,0x41,0x28,0xB4,0x43,0xFC,0x05
-,0xFF,0xE6,0xA0,0x31,0x20,0x00
-,0x06,0xEB,0x31,0x08,0x20,0x00
-,0x04,0x3C,0x20,0xEB,0x31,0x08
-,0x20,0x00,0x02,0x3C,0x10,0xEB
-,0x31,0x08,0x20,0x00,0x00,0xED
-,0x31,0x08,0x20,0x00,0x00,0x04
-,0x10,0xF7,0xED,0x31,0x08,0x20
-,0x00,0x04,0x42,0x10,0x90,0x08
-,0xEC,0x31,0xAE,0x20,0x00,0x06
-,0xA4,0x41,0x08,0x00,0x68,0xED
-,0x41,0x28,0x7D,0xFF,0xFF,0x22
-,0xB3,0x40,0x98,0x2A,0x32,0xEB
-,0x41,0x28,0xB4,0x43,0xFC,0x05
-,0xFF,0xE6,0x48,0x04,0xEB,0x31
-,0x08,0x20,0x00,0x04,0xEB,0x31
-,0x18,0x20,0x00,0x02,0x3C,0x11
-,0xEB,0x31,0x18,0x20,0x00,0x00
-,0xED,0x31,0x08,0x20,0x00,0x00
-,0x04,0x10,0xF7,0xED,0x31,0x08
-,0x20,0x00,0x02,0x66,0x00,0x6F
-,0x00,0x01,0x16,0x76,0xEE,0x06
-,0x48,0x4A,0x1E,0x48,0x04,0xED
-,0x31,0x08,0x20,0x00,0x04,0xEB
-,0x31,0x08,0x00,0x00,0xA4,0x48
-,0x04,0xED,0x31,0x08,0x20,0x00
-,0x04,0xEB,0x31,0x08,0x00,0x00
-,0xA2,0x48,0x04,0x20,0x20,0x4A
-,0x7C,0x46,0x82,0x50,0x05,0x50
-,0x15,0xB5,0x1E,0x98,0xED,0x31
-,0x08,0x00,0x00,0xA8,0x10,0x47
-,0x3B,0x2C,0x01,0xDB,0x40,0x11
-,0x98,0xC1,0x1E,0x98,0x10,0x07
-,0x30,0xF9,0x40,0x07,0x18,0x98
-,0x2A,0x10,0xEB,0x31,0x08,0x00
-,0x00,0xA8,0xA4,0x1E,0x98,0xBB
-,0x1E,0x98,0x50,0x14,0x50,0x04
-,0x46,0x83,0x48,0x04,0x02,0x01
-,0x00,0x50,0x05,0x50,0x15,0x10
-,0x87,0x3F,0x90,0x2B,0x18,0x01
-,0x00,0xC0,0x31,0x00,0x00,0xAE
-,0xDF,0x41,0x00,0x08,0x00,0x1A
-,0x42,0x11,0x67,0x01,0xDF,0x41
-,0x02,0x08,0x00,0x10,0x42,0x11
-,0x62,0x01,0xB4,0x43,0x4A,0x68
-,0x50,0x14,0x50,0x04,0x24,0x10
-,0x48,0x04,0xF2,0x31,0x00,0x01
-,0x00,0x00,0xAE,0xF6,0x31,0x00
-,0x01,0x00,0x00,0xAE,0x62,0xE4
-,0xE5,0x61,0x04,0x48,0x04,0xE5
-,0x63,0x05,0x48,0x04,0x20,0x20
-,0x00,0x00,0x00,0x00
+/* Official bootloader */
+static unsigned char bootimage[] = {
+ 0x00, 0x00, 0x01, 0x5E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xD7,
+ 0x00, 0x00, 0x01, 0x5E, 0x46, 0xB3,
+ 0xE6, 0x02, 0x00, 0x98, 0xE6, 0x8C,
+ 0x00, 0x98, 0xFB, 0x92, 0xFF, 0xFF,
+ 0x98, 0xFB, 0x94, 0xFF, 0xFF, 0x98,
+ 0xFB, 0x06, 0x08, 0x00, 0x98, 0xFB,
+ 0x96, 0x84, 0x00, 0x98, 0xFB, 0x08,
+ 0x1C, 0x00, 0x98, 0xFB, 0x51, 0x25,
+ 0x10, 0x1C, 0x00, 0xE6, 0x51, 0x01,
+ 0x07, 0xFD, 0x4C, 0xFF, 0x20, 0xF5,
+ 0x51, 0x02, 0x20, 0x08, 0x00, 0x4C,
+ 0xFF, 0x20, 0x3C, 0x00, 0xC0, 0x64,
+ 0x98, 0xC0, 0x66, 0x98, 0xC0, 0x68,
+ 0x98, 0xC0, 0x6A, 0x98, 0xC0, 0x6C,
+ 0x98, 0x90, 0x08, 0x90, 0x09, 0x90,
+ 0x0A, 0x90, 0x0B, 0x90, 0x0C, 0x90,
+ 0x0D, 0x90, 0x0E, 0x90, 0x0F, 0x90,
+ 0x04, 0x90, 0x06, 0xFB, 0x51, 0x22,
+ 0x16, 0x08, 0x03, 0xFB, 0x51, 0x52,
+ 0x16, 0x08, 0x04, 0xFB, 0x51, 0x24,
+ 0x2B, 0x08, 0x06, 0xFB, 0x51, 0x54,
+ 0x2B, 0x08, 0x07, 0xFB, 0x51, 0x24,
+ 0x2B, 0x08, 0x09, 0xFB, 0x51, 0x54,
+ 0x2B, 0x08, 0x0A, 0xFB, 0x51, 0x12,
+ 0x16, 0x08, 0x0C, 0xFB, 0x51, 0x52,
+ 0x16, 0x08, 0x0D, 0x78, 0x00, 0x00,
+ 0x00, 0x16, 0x00, 0x00, 0xEC, 0x31,
+ 0xAE, 0x00, 0x00, 0x81, 0x4C, 0x0F,
+ 0xE6, 0x43, 0xFF, 0xEC, 0x31, 0x4E,
+ 0x00, 0x00, 0x91, 0xEC, 0x31, 0xAE,
+ 0x00, 0x00, 0x91, 0x4C, 0x0F, 0xE6,
+ 0x43, 0xFF, 0xEC, 0x31, 0x5E, 0x00,
+ 0x00, 0xA1, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xA6, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xAC, 0x3C, 0x00, 0xEB, 0x31,
+ 0x08, 0x00, 0x00, 0xA8, 0x76, 0xFE,
+ 0xFE, 0x08, 0xEB, 0x31, 0x08, 0x20,
+ 0x00, 0x00, 0x76, 0xFF, 0xFF, 0x18,
+ 0xED, 0x31, 0x08, 0x20, 0x00, 0x00,
+ 0x26, 0x10, 0x04, 0x10, 0xF5, 0x3C,
+ 0x01, 0x3C, 0x00, 0x08, 0x01, 0x12,
+ 0x3C, 0x11, 0x3C, 0x00, 0x08, 0x01,
+ 0x0B, 0x08, 0x00, 0x6D, 0xEC, 0x31,
+ 0xAE, 0x20, 0x00, 0x06, 0xED, 0x4D,
+ 0x08, 0x00, 0x00, 0x67, 0x80, 0x6F,
+ 0x00, 0x01, 0x0B, 0x6F, 0x00, 0x02,
+ 0x2E, 0x76, 0xEE, 0x01, 0x48, 0x06,
+ 0x01, 0x39, 0xED, 0x4D, 0x18, 0x00,
+ 0x02, 0xED, 0x4D, 0x08, 0x00, 0x04,
+ 0x14, 0x06, 0xA4, 0xED, 0x31, 0x22,
+ 0x00, 0x00, 0xAC, 0x76, 0xEE, 0x07,
+ 0x48, 0x6D, 0x22, 0x01, 0x1E, 0x08,
+ 0x01, 0x58, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xAC, 0x06, 0xFF, 0xBA, 0x3C,
+ 0x00, 0xEB, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0x3C, 0x30, 0xEB, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
+ 0x10, 0xF7, 0xED, 0x31, 0x08, 0x00,
+ 0x00, 0xA2, 0x91, 0x00, 0x9C, 0x3C,
+ 0x80, 0xEB, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
+ 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20,
+ 0x00, 0x04, 0x42, 0x10, 0x90, 0x08,
+ 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06,
+ 0xA4, 0x41, 0x08, 0x00, 0xB6, 0xED,
+ 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22,
+ 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB,
+ 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05,
+ 0xFF, 0xE6, 0xA0, 0x31, 0x20, 0x00,
+ 0x06, 0xEB, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
+ 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20,
+ 0x00, 0x04, 0x42, 0x10, 0x90, 0x08,
+ 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06,
+ 0xA4, 0x41, 0x08, 0x00, 0x68, 0xED,
+ 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22,
+ 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB,
+ 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05,
+ 0xFF, 0xE6, 0x48, 0x04, 0xEB, 0x31,
+ 0x08, 0x20, 0x00, 0x04, 0xEB, 0x31,
+ 0x18, 0x20, 0x00, 0x02, 0x3C, 0x11,
+ 0xEB, 0x31, 0x18, 0x20, 0x00, 0x00,
+ 0xED, 0x31, 0x08, 0x20, 0x00, 0x00,
+ 0x04, 0x10, 0xF7, 0xED, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x66, 0x00, 0x6F,
+ 0x00, 0x01, 0x16, 0x76, 0xEE, 0x06,
+ 0x48, 0x4A, 0x1E, 0x48, 0x04, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x04, 0xEB,
+ 0x31, 0x08, 0x00, 0x00, 0xA4, 0x48,
+ 0x04, 0xED, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0xEB, 0x31, 0x08, 0x00, 0x00,
+ 0xA2, 0x48, 0x04, 0x20, 0x20, 0x4A,
+ 0x7C, 0x46, 0x82, 0x50, 0x05, 0x50,
+ 0x15, 0xB5, 0x1E, 0x98, 0xED, 0x31,
+ 0x08, 0x00, 0x00, 0xA8, 0x10, 0x47,
+ 0x3B, 0x2C, 0x01, 0xDB, 0x40, 0x11,
+ 0x98, 0xC1, 0x1E, 0x98, 0x10, 0x07,
+ 0x30, 0xF9, 0x40, 0x07, 0x18, 0x98,
+ 0x2A, 0x10, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xA8, 0xA4, 0x1E, 0x98, 0xBB,
+ 0x1E, 0x98, 0x50, 0x14, 0x50, 0x04,
+ 0x46, 0x83, 0x48, 0x04, 0x02, 0x01,
+ 0x00, 0x50, 0x05, 0x50, 0x15, 0x10,
+ 0x87, 0x3F, 0x90, 0x2B, 0x18, 0x01,
+ 0x00, 0xC0, 0x31, 0x00, 0x00, 0xAE,
+ 0xDF, 0x41, 0x00, 0x08, 0x00, 0x1A,
+ 0x42, 0x11, 0x67, 0x01, 0xDF, 0x41,
+ 0x02, 0x08, 0x00, 0x10, 0x42, 0x11,
+ 0x62, 0x01, 0xB4, 0x43, 0x4A, 0x68,
+ 0x50, 0x14, 0x50, 0x04, 0x24, 0x10,
+ 0x48, 0x04, 0xF2, 0x31, 0x00, 0x01,
+ 0x00, 0x00, 0xAE, 0xF6, 0x31, 0x00,
+ 0x01, 0x00, 0x00, 0xAE, 0x62, 0xE4,
+ 0xE5, 0x61, 0x04, 0x48, 0x04, 0xE5,
+ 0x63, 0x05, 0x48, 0x04, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x00
};
#endif
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 29d0a72..d6421b9 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -15,7 +15,7 @@
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place -
Suite 330, Boston, MA 02111-1307, USA.
------------------------------------------------------------------------------*/
+ -------------------------------------------------------------------------*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -80,19 +80,19 @@ MODULE_SUPPORTED_DEVICE("FT1000");
#define MAX_RCV_LOOP 100
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_fifo_len
-// Description: This function will read the ASIC Uplink FIFO status register
-// which will return the number of bytes remaining in the Uplink FIFO.
-// Sixteen bytes are subtracted to make sure that the ASIC does not
-// reach its threshold.
-// Input:
-// dev - network device structure
-// Output:
-// value - number of bytes available in the ASIC Uplink FIFO.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_fifo_len
+ Description: This function will read the ASIC Uplink FIFO status register
+ which will return the number of bytes remaining in the Uplink FIFO.
+ Sixteen bytes are subtracted to make sure that the ASIC does not
+ reach its threshold.
+ Input:
+ dev - network device structure
+ Output:
+ value - number of bytes available in the ASIC Uplink FIFO.
+
+ -------------------------------------------------------------------------*/
static inline u16 ft1000_read_fifo_len(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -103,25 +103,25 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev)
return (ft1000_read_reg(dev, FT1000_REG_MAG_UFSR) - 16);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_dpram
-// Description: This function will read the specific area of dpram
-// (Electrabuzz ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// Output:
-// value - value of dpram
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_dpram
+ Description: This function will read the specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
+
+ -------------------------------------------------------------------------*/
u16 ft1000_read_dpram(struct net_device *dev, int offset)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u16 data;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
data = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
@@ -130,54 +130,54 @@ u16 ft1000_read_dpram(struct net_device *dev, int offset)
return (data);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_dpram
-// Description: This function will write to a specific area of dpram
-// (Electrabuzz ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// value - value to write
-// Output:
-// none.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_write_dpram
+ Description: This function will write to a specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
+
+ -------------------------------------------------------------------------*/
static inline void ft1000_write_dpram(struct net_device *dev,
int offset, u16 value)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, value);
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_dpram_mag_16
-// Description: This function will read the specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// Output:
-// value - value of dpram
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_dpram_mag_16
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
+
+ -------------------------------------------------------------------------*/
u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u16 data;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- // check if we want to read upper or lower 32-bit word
+ /* check if we want to read upper or lower 32-bit word */
if (Index) {
data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAL);
} else {
@@ -188,26 +188,26 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
return (data);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_dpram_mag_16
-// Description: This function will write to a specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// value - value to write
-// Output:
-// none.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_write_dpram_mag_16
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
+
+ -------------------------------------------------------------------------*/
static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
int offset, u16 value, int Index)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
if (Index) {
@@ -218,25 +218,25 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_dpram_mag_32
-// Description: This function will read the specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// Output:
-// value - value of dpram
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_dpram_mag_32
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
+
+ -------------------------------------------------------------------------*/
u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u32 data;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
data = inl(dev->base_addr + FT1000_REG_MAG_DPDATAL);
@@ -245,41 +245,41 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
return (data);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_dpram_mag_32
-// Description: This function will write to a specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// value - value to write
-// Output:
-// none.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_write_dpram_mag_32
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
+
+ -------------------------------------------------------------------------*/
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
outl(value, dev->base_addr + FT1000_REG_MAG_DPDATAL);
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_enable_interrupts
-// Description: This function will enable interrupts base on the current interrupt mask.
-// Input:
-// dev - device structure
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_enable_interrupts
+ Description: This function will enable interrupts base on the current interrupt mask.
+ Input:
+ dev - device structure
+ Output:
+ None.
+
+ -------------------------------------------------------------------------*/
static void ft1000_enable_interrupts(struct net_device *dev)
{
u16 tempword;
@@ -292,16 +292,16 @@ static void ft1000_enable_interrupts(struct net_device *dev)
tempword);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_disable_interrupts
-// Description: This function will disable all interrupts.
-// Input:
-// dev - device structure
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_disable_interrupts
+ Description: This function will disable all interrupts.
+ Input:
+ dev - device structure
+ Output:
+ None.
+
+ -------------------------------------------------------------------------*/
static void ft1000_disable_interrupts(struct net_device *dev)
{
u16 tempword;
@@ -314,17 +314,17 @@ static void ft1000_disable_interrupts(struct net_device *dev)
tempword);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_asic
-// Description: This function will call the Card Service function to reset the
-// ASIC.
-// Input:
-// dev - device structure
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_reset_asic
+ Description: This function will call the Card Service function to reset the
+ ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
+
+ -------------------------------------------------------------------------*/
static void ft1000_reset_asic(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -335,21 +335,23 @@ static void ft1000_reset_asic(struct net_device *dev)
(*info->ft1000_reset) (pcmcia->link);
- // Let's use the register provided by the Magnemite ASIC to reset the
- // ASIC and DSP.
+ /*
+ * Let's use the register provided by the Magnemite ASIC to reset the
+ * ASIC and DSP.
+ */
if (info->AsicID == MAGNEMITE_ID) {
ft1000_write_reg(dev, FT1000_REG_RESET,
(DSP_RESET_BIT | ASIC_RESET_BIT));
}
mdelay(1);
if (info->AsicID == ELECTRABUZZ_ID) {
- // set watermark to -1 in order to not generate an interrupt
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
} else {
- // set watermark to -1 in order to not generate an interrupt
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
}
- // clear interrupts
+ /* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
@@ -358,17 +360,17 @@ static void ft1000_reset_asic(struct net_device *dev)
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_card
-// Description: This function will reset the card
-// Input:
-// dev - device structure
-// Output:
-// status - false (card reset fail)
-// true (card reset successful)
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_reset_card
+ Description: This function will reset the card
+ Input:
+ dev - device structure
+ Output:
+ status - false (card reset fail)
+ true (card reset successful)
+
+ -------------------------------------------------------------------------*/
static int ft1000_reset_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -384,9 +386,9 @@ static int ft1000_reset_card(struct net_device *dev)
info->squeseqnum = 0;
ft1000_disable_interrupts(dev);
-// del_timer(&poll_timer);
+ /* del_timer(&poll_timer); */
- // Make sure we free any memory reserve for provisioning
+ /* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
DEBUG(0,
"ft1000_hw:ft1000_reset_card:deleting provisioning record\n");
@@ -406,7 +408,7 @@ static int ft1000_reset_card(struct net_device *dev)
(DSP_RESET_BIT | ASIC_RESET_BIT));
}
- // Copy DSP session record into info block if this is not a coldstart
+ /* Copy DSP session record into info block if this is not a coldstart */
if (ft1000_card_present == 1) {
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -430,29 +432,29 @@ static int ft1000_reset_card(struct net_device *dev)
DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC\n");
mdelay(10);
- //reset ASIC
+ /* reset ASIC */
ft1000_reset_asic(dev);
DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n");
if (info->AsicID == MAGNEMITE_ID) {
- // Put dsp in reset and take ASIC out of reset
+ /* Put dsp in reset and take ASIC out of reset */
DEBUG(0,
"ft1000_hw:ft1000_reset_card:Put DSP in reset and take ASIC out of reset\n");
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
- // Setting MAGNEMITE ASIC to big endian mode
+ /* Setting MAGNEMITE ASIC to big endian mode */
ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE);
- // Download bootloader
+ /* Download bootloader */
card_bootload(dev);
- // Take DSP out of reset
+ /* Take DSP out of reset */
ft1000_write_reg(dev, FT1000_REG_RESET, 0);
- // FLARION_DSP_ACTIVE;
+ /* FLARION_DSP_ACTIVE; */
mdelay(10);
DEBUG(0, "ft1000_hw:ft1000_reset_card:Take DSP out of reset\n");
- // Wait for 0xfefe indicating dsp ready before starting download
+ /* Wait for 0xfefe indicating dsp ready before starting download */
for (i = 0; i < 50; i++) {
tempword =
ft1000_read_dpram_mag_16(dev, FT1000_MAG_DPRAM_FEFE,
@@ -470,7 +472,7 @@ static int ft1000_reset_card(struct net_device *dev)
}
} else {
- // Take DSP out of reset
+ /* Take DSP out of reset */
ft1000_write_reg(dev, FT1000_REG_RESET, ~DSP_RESET_BIT);
mdelay(10);
}
@@ -485,17 +487,19 @@ static int ft1000_reset_card(struct net_device *dev)
mdelay(10);
if (info->AsicID == ELECTRABUZZ_ID) {
- // Need to initialize the FIFO length counter to zero in order to sync up
- // with the DSP
+ /*
+ * Need to initialize the FIFO length counter to zero in order to sync up
+ * with the DSP
+ */
info->fifo_cnt = 0;
ft1000_write_dpram(dev, FT1000_FIFO_LEN, info->fifo_cnt);
- // Initialize DSP heartbeat area to ho
+ /* Initialize DSP heartbeat area to ho */
ft1000_write_dpram(dev, FT1000_HI_HO, ho);
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
DEBUG(1, "ft1000_hw:ft1000_reset_asic:hi_ho value = 0x%x\n",
tempword);
} else {
- // Initialize DSP heartbeat area to ho
+ /* Initialize DSP heartbeat area to ho */
ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag,
FT1000_MAG_HI_HO_INDX);
tempword =
@@ -509,40 +513,44 @@ static int ft1000_reset_card(struct net_device *dev)
ft1000_enable_interrupts(dev);
/* Schedule heartbeat process to run every 2 seconds */
-// poll_timer.expires = jiffies + (2*HZ);
-// poll_timer.data = (u_long)dev;
-// add_timer(&poll_timer);
+ /* poll_timer.expires = jiffies + (2*HZ); */
+ /* poll_timer.data = (u_long)dev; */
+ /* add_timer(&poll_timer); */
return true;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_chkcard
-// Description: This function will check if the device is presently available on
-// the system.
-// Input:
-// dev - device structure
-// Output:
-// status - false (device is not present)
-// true (device is present)
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_chkcard
+ Description: This function will check if the device is presently available on
+ the system.
+ Input:
+ dev - device structure
+ Output:
+ status - false (device is not present)
+ true (device is present)
+
+ -------------------------------------------------------------------------*/
static int ft1000_chkcard(struct net_device *dev)
{
u16 tempword;
- // Mask register is used to check for device presence since it is never
- // set to zero.
+ /*
+ * Mask register is used to check for device presence since it is never
+ * set to zero.
+ */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
DEBUG(1,
"ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
return false;
}
- // The system will return the value of 0xffff for the version register
- // if the device is not present.
+ /*
+ * The system will return the value of 0xffff for the version register
+ * if the device is not present.
+ */
tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
if (tempword == 0xffff) {
DEBUG(1,
@@ -553,17 +561,17 @@ static int ft1000_chkcard(struct net_device *dev)
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_hbchk
-// Description: This function will perform the heart beat check of the DSP as
-// well as the ASIC.
-// Input:
-// dev - device structure
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_hbchk
+ Description: This function will perform the heart beat check of the DSP as
+ well as the ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
+
+ -------------------------------------------------------------------------*/
static void ft1000_hbchk(u_long data)
{
struct net_device *dev = (struct net_device *)data;
@@ -574,7 +582,7 @@ static void ft1000_hbchk(u_long data)
info = netdev_priv(dev);
if (info->CardReady == 1) {
- // Perform dsp heartbeat check
+ /* Perform dsp heartbeat check */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
} else {
@@ -585,7 +593,7 @@ static void ft1000_hbchk(u_long data)
}
DEBUG(1, "ft1000_hw:ft1000_hbchk:hi_ho value = 0x%x\n",
tempword);
- // Let's perform another check if ho is not detected
+ /* Let's perform another check if ho is not detected */
if (tempword != ho) {
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
@@ -639,7 +647,7 @@ static void ft1000_hbchk(u_long data)
}
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- // Let's check doorbell again if fail
+ /* Let's check doorbell again if fail */
if (tempword & FT1000_DB_HB) {
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
}
@@ -686,8 +694,10 @@ static void ft1000_hbchk(u_long data)
add_timer(&poll_timer);
return;
}
- // Set dedicated area to hi and ring appropriate doorbell according
- // to hi/ho heartbeat protocol
+ /*
+ * Set dedicated area to hi and ring appropriate doorbell according
+ * to hi/ho heartbeat protocol
+ */
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_dpram(dev, FT1000_HI_HO, hi);
} else {
@@ -703,7 +713,7 @@ static void ft1000_hbchk(u_long data)
(dev, FT1000_MAG_HI_HO,
FT1000_MAG_HI_HO_INDX));
}
- // Let's write hi again if fail
+ /* Let's write hi again if fail */
if (tempword != hi) {
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_dpram(dev, FT1000_HI_HO, hi);
@@ -774,14 +784,14 @@ static void ft1000_hbchk(u_long data)
add_timer(&poll_timer);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_send_cmd
-// Description:
-// Input:
-// Output:
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_send_cmd
+ Description:
+ Input:
+ Output:
+
+ -------------------------------------------------------------------------*/
static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -790,17 +800,19 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
unsigned long flags;
size += sizeof(struct pseudo_hdr);
- // check for odd byte and increment to 16-bit word align value
+ /* check for odd byte and increment to 16-bit word align value */
if ((size & 0x0001)) {
size++;
}
DEBUG(1, "FT1000:ft1000_send_cmd:total length = %d\n", size);
DEBUG(1, "FT1000:ft1000_send_cmd:length = %d\n", ntohs(*ptempbuffer));
- // put message into slow queue area
- // All messages are in the form total_len + pseudo header + message body
+ /*
+ * put message into slow queue area
+ * All messages are in the form total_len + pseudo header + message body
+ */
spin_lock_irqsave(&info->dpram_lock, flags);
- // Make sure SLOWQ doorbell is clear
+ /* Make sure SLOWQ doorbell is clear */
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
i=0;
while (tempword & FT1000_DB_DPRAM_TX) {
@@ -816,9 +828,9 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_TX_BASE);
- // Write total length to dpram
+ /* Write total length to dpram */
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
- // Write pseudo header and messgae body
+ /* Write pseudo header and messgae body */
for (i = 0; i < (size >> 1); i++) {
DEBUG(1, "FT1000:ft1000_send_cmd:data %d = 0x%x\n", i,
*ptempbuffer);
@@ -828,9 +840,9 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
} else {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_TX_BASE);
- // Write total length to dpram
+ /* Write total length to dpram */
ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, htons(size));
- // Write pseudo header and messgae body
+ /* Write pseudo header and messgae body */
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_TX_BASE + 1);
for (i = 0; i < (size >> 2); i++) {
@@ -850,23 +862,23 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
- // ring doorbell to notify DSP that we have a message ready
+ /* ring doorbell to notify DSP that we have a message ready */
ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_TX);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_receive_cmd
-// Description: This function will read a message from the dpram area.
-// Input:
-// dev - network device structure
-// pbuffer - caller supply address to buffer
-// pnxtph - pointer to next pseudo header
-// Output:
-// Status = 0 (unsuccessful)
-// = 1 (successful)
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_receive_cmd
+ Description: This function will read a message from the dpram area.
+ Input:
+ dev - network device structure
+ pbuffer - caller supply address to buffer
+ pnxtph - pointer to next pseudo header
+ Output:
+ Status = 0 (unsuccessful)
+ = 1 (successful)
+
+ -------------------------------------------------------------------------*/
static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
int maxsz, u16 *pnxtph)
{
@@ -919,7 +931,7 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
FT1000_REG_MAG_DPDATAH);
pbuffer++;
}
- //copy odd aligned word
+ /* copy odd aligned word */
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL);
DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
pbuffer++;
@@ -928,14 +940,16 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
pbuffer++;
}
if (size & 0x0001) {
- //copy odd byte from fifo
+ /* copy odd byte from fifo */
tempword = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
*pbuffer = ntohs(tempword);
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
- // Check if pseudo header checksum is good
- // Calculate pseudo header checksum
+ /*
+ * Check if pseudo header checksum is good
+ * Calculate pseudo header checksum
+ */
tempword = *ppseudohdr++;
for (i = 1; i < 7; i++) {
tempword ^= *ppseudohdr++;
@@ -943,24 +957,24 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
if ((tempword != *ppseudohdr)) {
DEBUG(1,
"FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n");
- // Drop this message
+ /* Drop this message */
return false;
}
return true;
}
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_proc_drvmsg
-// Description: This function will process the various driver messages.
-// Input:
-// dev - device structure
-// pnxtph - pointer to next pseudo header
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_proc_drvmsg
+ Description: This function will process the various driver messages.
+ Input:
+ dev - device structure
+ pnxtph - pointer to next pseudo header
+ Output:
+ none
+
+ -------------------------------------------------------------------------*/
static void ft1000_proc_drvmsg(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -988,7 +1002,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) {
- // Get the message type which is total_len + PSEUDO header + msgtype + message body
+ /* Get the message type which is total_len + PSEUDO header + msgtype + message body */
pdrvmsg = (struct drv_msg *) & cmdbuffer[0];
msgtype = ntohs(pdrvmsg->type);
DEBUG(1, "Command message type = 0x%x\n", msgtype);
@@ -999,7 +1013,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
mdelay(25);
while (list_empty(&info->prov_list) == 0) {
DEBUG(0, "Sending a provisioning message\n");
- // Make sure SLOWQ doorbell is clear
+ /* Make sure SLOWQ doorbell is clear */
tempword =
ft1000_read_reg(dev, FT1000_REG_DOORBELL);
i = 0;
@@ -1018,10 +1032,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
pmsg = (u16 *) ptr->pprov_data;
ppseudo_hdr = (struct pseudo_hdr *) pmsg;
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
ppseudo_hdr->portsrc = 0;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
DEBUG(1, "checksum = 0x%x\n",
ppseudo_hdr->checksum);
@@ -1036,8 +1050,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
kfree(ptr->pprov_data);
kfree(ptr);
}
- // Indicate adapter is ready to take application messages after all
- // provisioning messages are sent
+ /*
+ * Indicate adapter is ready to take application messages after all
+ * provisioning messages are sent
+ */
info->CardReady = 1;
break;
case MEDIA_STATE:
@@ -1118,8 +1134,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
break;
case DSP_GET_INFO:
DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n");
- // copy dsp info block to dsp
- // allow any outstanding ioctl to finish
+ /*
+ * copy dsp info block to dsp
+ * allow any outstanding ioctl to finish
+ */
mdelay(10);
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
@@ -1132,8 +1150,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- // Put message into Slow Queue
- // Form Pseudo header
+ /*
+ * Put message into Slow Queue
+ * Form Pseudo header
+ */
pmsg = (u16 *) info->DSPInfoBlk;
ppseudo_hdr = (struct pseudo_hdr *) pmsg;
ppseudo_hdr->length =
@@ -1147,11 +1167,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ppseudo_hdr->rsvd1 = 0;
ppseudo_hdr->rsvd2 = 0;
ppseudo_hdr->qos_class = 0;
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
- // Insert application id
+ /* Insert application id */
ppseudo_hdr->portsrc = 0;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
for (i = 1; i < 7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
@@ -1165,8 +1185,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
break;
case GET_DRV_ERR_RPT_MSG:
DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
- // copy driver error message to dsp
- // allow any outstanding ioctl to finish
+ /*
+ * copy driver error message to dsp
+ * allow any outstanding ioctl to finish
+ */
mdelay(10);
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
@@ -1179,8 +1201,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- // Put message into Slow Queue
- // Form Pseudo header
+ /*
+ * Put message into Slow Queue
+ * Form Pseudo header
+ */
pmsg = (u16 *) & tempbuffer[0];
ppseudo_hdr = (struct pseudo_hdr *) pmsg;
ppseudo_hdr->length = htons(0x0012);
@@ -1193,11 +1217,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ppseudo_hdr->rsvd1 = 0;
ppseudo_hdr->rsvd2 = 0;
ppseudo_hdr->qos_class = 0;
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
- // Insert application id
+ /* Insert application id */
ppseudo_hdr->portsrc = 0;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
for (i=1; i<7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
@@ -1228,18 +1252,18 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_parse_dpram_msg
-// Description: This function will parse the message received from the DSP
-// via the DPRAM interface.
-// Input:
-// dev - device structure
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_parse_dpram_msg
+ Description: This function will parse the message received from the DSP
+ via the DPRAM interface.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
+
+ -------------------------------------------------------------------------*/
static int ft1000_parse_dpram_msg(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -1255,7 +1279,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
DEBUG(1, "Doorbell = 0x%x\n", doorbell);
if (doorbell & FT1000_ASIC_RESET_REQ) {
- // Copy DSP session record from info block
+ /* Copy DSP session record from info block */
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -1274,7 +1298,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
- // clear ASIC RESET request
+ /* clear ASIC RESET request */
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_ASIC_RESET_REQ);
DEBUG(1, "Got an ASIC RESET Request\n");
@@ -1282,7 +1306,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
FT1000_ASIC_RESET_DSP);
if (info->AsicID == MAGNEMITE_ID) {
- // Setting MAGNEMITE ASIC to big endian mode
+ /* Setting MAGNEMITE ASIC to big endian mode */
ft1000_write_reg(dev, FT1000_REG_SUP_CTRL,
HOST_INTF_BE);
}
@@ -1315,8 +1339,10 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) {
total_len += nxtph;
cnt = 0;
- // ft1000_read_reg will return a value that needs to be byteswap
- // in order to get DSP_QID_OFFSET.
+ /*
+ * ft1000_read_reg will return a value that needs to be byteswap
+ * in order to get DSP_QID_OFFSET.
+ */
if (info->AsicID == ELECTRABUZZ_ID) {
portid =
(ft1000_read_dpram
@@ -1332,7 +1358,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
DEBUG(1, "DSP_QID = 0x%x\n", portid);
if (portid == DRIVERID) {
- // We are assumming one driver message from the DSP at a time.
+ /* We are assumming one driver message from the DSP at a time. */
ft1000_proc_drvmsg(dev);
}
}
@@ -1340,7 +1366,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
if (doorbell & FT1000_DB_COND_RESET) {
- // Reset ASIC and DSP
+ /* Reset ASIC and DSP */
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -1370,7 +1396,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DB_COND_RESET);
}
- // let's clear any unexpected doorbells from DSP
+ /* let's clear any unexpected doorbells from DSP */
doorbell =
doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ |
FT1000_DB_COND_RESET | 0xff00);
@@ -1383,18 +1409,18 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_flush_fifo
-// Description: This function will flush one packet from the downlink
-// FIFO.
-// Input:
-// dev - device structure
-// drv_err - driver error causing the flush fifo
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_flush_fifo
+ Description: This function will flush one packet from the downlink
+ FIFO.
+ Input:
+ dev - device structure
+ drv_err - driver error causing the flush fifo
+ Output:
+ None.
+
+ -------------------------------------------------------------------------*/
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -1432,7 +1458,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
ft1000_reset_card(dev);
return;
} else {
- // Flush corrupted pkt from FIFO
+ /* Flush corrupted pkt from FIFO */
i = 0;
do {
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -1447,8 +1473,10 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
inw(dev->base_addr + FT1000_REG_MAG_DFSR);
}
i++;
- // This should never happen unless the ASIC is broken.
- // We must reset to recover.
+ /*
+ * This should never happen unless the ASIC is broken.
+ * We must reset to recover.
+ */
if ((i > 2048) || (tempword == 0)) {
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
@@ -1482,17 +1510,19 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
FT1000_MAG_DSP_TIMER3_INDX);
}
if (tempword == 0) {
- // Let's check if ASIC reads are still ok by reading the Mask register
- // which is never zero at this point of the code.
+ /*
+ * Let's check if ASIC reads are still ok by reading the Mask register
+ * which is never zero at this point of the code.
+ */
tempword =
inw(dev->base_addr +
FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- // This indicates that we can not communicate with the ASIC
+ /* This indicates that we can not communicate with the ASIC */
info->DrvErrNum =
FIFO_FLUSH_BADCNT;
} else {
- // Let's assume that we really flush the FIFO
+ /* Let's assume that we really flush the FIFO */
pcmcia->PktIntfErr++;
return;
}
@@ -1506,9 +1536,9 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
if (info->AsicID == ELECTRABUZZ_ID) {
i++;
DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
- // Flush last word in FIFO.
+ /* Flush last word in FIFO. */
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- // Update FIFO counter for DSP
+ /* Update FIFO counter for DSP */
i = i * 2;
DEBUG(0, "Flush Data byte count to dsp = %d\n", i);
info->fifo_cnt += i;
@@ -1516,7 +1546,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
info->fifo_cnt);
} else {
DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
- // Flush last word in FIFO
+ /* Flush last word in FIFO */
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT);
DEBUG(0, "FT1000_REG_SUP_STAT = 0x%x\n", tempword);
@@ -1529,19 +1559,19 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
}
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_up_pkt
-// Description: This function will pull Flarion packets out of the Downlink
-// FIFO and convert it to an ethernet packet. The ethernet packet will
-// then be deliver to the TCP/IP stack.
-// Input:
-// dev - device structure
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_copy_up_pkt
+ Description: This function will pull Flarion packets out of the Downlink
+ FIFO and convert it to an ethernet packet. The ethernet packet will
+ then be deliver to the TCP/IP stack.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
+
+ -------------------------------------------------------------------------*/
static int ft1000_copy_up_pkt(struct net_device *dev)
{
u16 tempword;
@@ -1556,7 +1586,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
u32 templong;
DEBUG(1, "ft1000_copy_up_pkt\n");
- // Read length
+ /* Read length */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
len = tempword;
@@ -1570,7 +1600,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
if (len > ENET_MAX_SIZE) {
DEBUG(0, "size of ethernet packet invalid\n");
if (info->AsicID == MAGNEMITE_ID) {
- // Read High word to complete 32 bit access
+ /* Read High word to complete 32 bit access */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
}
ft1000_flush_fifo(dev, DSP_PKTLEN_INFO);
@@ -1582,7 +1612,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
if (skb == NULL) {
DEBUG(0, "No Network buffers available\n");
- // Read High word to complete 32 bit access
+ /* Read High word to complete 32 bit access */
if (info->AsicID == MAGNEMITE_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
}
@@ -1592,13 +1622,13 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
}
pbuffer = (u8 *) skb_put(skb, len + 12);
- // Pseudo header
+ /* Pseudo header */
if (info->AsicID == ELECTRABUZZ_ID) {
for (i = 1; i < 7; i++) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
chksum ^= tempword;
}
- // read checksum value
+ /* read checksum value */
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
} else {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
@@ -1625,7 +1655,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
DEBUG(1, "Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
- // read checksum value
+ /* read checksum value */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
DEBUG(1, "Pseudo = 0x%x\n", tempword);
}
@@ -1638,10 +1668,10 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
kfree_skb(skb);
return FAILURE;
}
- //subtract the number of bytes read already
+ /* subtract the number of bytes read already */
ptemp = pbuffer;
- // fake MAC address
+ /* fake MAC address */
*pbuffer++ = dev->dev_addr[0];
*pbuffer++ = dev->dev_addr[1];
*pbuffer++ = dev->dev_addr[2];
@@ -1666,7 +1696,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
}
}
- // Need to read one more word if odd byte
+ /* Need to read one more word if odd byte */
if (len & 0x0001) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
*pbuffer++ = (u8) (tempword >> 8);
@@ -1679,7 +1709,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
*ptemplong++ = templong;
}
- // Need to read one more word if odd align.
+ /* Need to read one more word if odd align. */
if (len & 0x0003) {
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
DEBUG(1, "Data = 0x%8x\n", templong);
@@ -1699,11 +1729,11 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
netif_rx(skb);
info->stats.rx_packets++;
- // Add on 12 bytes for MAC address which was removed
+ /* Add on 12 bytes for MAC address which was removed */
info->stats.rx_bytes += (len + 12);
if (info->AsicID == ELECTRABUZZ_ID) {
- // track how many bytes have been read from FIFO - round up to 16 bit word
+ /* track how many bytes have been read from FIFO - round up to 16 bit word */
tempword = len + 16;
if (tempword & 0x01)
tempword++;
@@ -1715,21 +1745,21 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
return SUCCESS;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_down_pkt
-// Description: This function will take an ethernet packet and convert it to
-// a Flarion packet prior to sending it to the ASIC Downlink
-// FIFO.
-// Input:
-// dev - device structure
-// packet - address of ethernet packet
-// len - length of IP packet
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_copy_down_pkt
+ Description: This function will take an ethernet packet and convert it to
+ a Flarion packet prior to sending it to the ASIC Downlink
+ FIFO.
+ Input:
+ dev - device structure
+ packet - address of ethernet packet
+ len - length of IP packet
+ Output:
+ status - FAILURE
+ SUCCESS
+
+ -------------------------------------------------------------------------*/
static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -1744,7 +1774,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
DEBUG(1, "ft1000_hw: copy_down_pkt()\n");
- // Check if there is room on the FIFO
+ /* Check if there is room on the FIFO */
if (len > ft1000_read_fifo_len(dev)) {
udelay(10);
if (len > ft1000_read_fifo_len(dev)) {
@@ -1769,15 +1799,15 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
return SUCCESS;
}
}
- // Create pseudo header and send pseudo/ip to hardware
+ /* Create pseudo header and send pseudo/ip to hardware */
if (info->AsicID == ELECTRABUZZ_ID) {
pseudo.blk.length = len;
} else {
pseudo.blk.length = ntohs(len);
}
- pseudo.blk.source = DSPID; // Need to swap to get in correct order
+ pseudo.blk.source = DSPID; /* Need to swap to get in correct order */
pseudo.blk.destination = HOSTID;
- pseudo.blk.portdest = NETWORKID; // Need to swap to get in correct order
+ pseudo.blk.portdest = NETWORKID; /* Need to swap to get in correct order */
pseudo.blk.portsrc = DSPAIRID;
pseudo.blk.sh_str_id = 0;
pseudo.blk.control = 0;
@@ -1791,14 +1821,14 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
pseudo.blk.checksum ^= pseudo.buff[i];
}
- // Production Mode
+ /* Production Mode */
if (info->AsicID == ELECTRABUZZ_ID) {
- // copy first word to UFIFO_BEG reg
+ /* copy first word to UFIFO_BEG reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]);
DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 0 BEG = 0x%04x\n",
pseudo.buff[0]);
- // copy subsequent words to UFIFO_MID reg
+ /* copy subsequent words to UFIFO_MID reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]);
DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 1 MID = 0x%04x\n",
pseudo.buff[1]);
@@ -1821,7 +1851,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 7 MID = 0x%04x\n",
pseudo.buff[7]);
- // Write PPP type + IP Packet into Downlink FIFO
+ /* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 1) - 1; i++) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
@@ -1831,7 +1861,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
packet++;
}
- // Check for odd byte
+ /* Check for odd byte */
if (len & 0x0001) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
@@ -1870,12 +1900,12 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
*(u32 *) & pseudo.buff[6]);
plong = (u32 *) packet;
- // Write PPP type + IP Packet into Downlink FIFO
+ /* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 2); i++) {
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
}
- // Check for odd alignment
+ /* Check for odd alignment */
if (len & 0x0003) {
DEBUG(1,
"ft1000_hw:ft1000_copy_down_pkt:data = 0x%8x\n",
@@ -1886,7 +1916,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
}
info->stats.tx_packets++;
- // Add 14 bytes for MAC address plus ethernet type
+ /* Add 14 bytes for MAC address plus ethernet type */
info->stats.tx_bytes += (len + 14);
return SUCCESS;
}
@@ -1931,7 +1961,7 @@ static int ft1000_close(struct net_device *dev)
ft1000_disable_interrupts(dev);
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
- //reset ASIC
+ /* reset ASIC */
ft1000_reset_asic(dev);
}
return 0;
@@ -1995,10 +2025,10 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
ft1000_disable_interrupts(dev);
- // Read interrupt type
+ /* Read interrupt type */
inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- // Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type
+ /* Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type */
while (inttype) {
if (inttype & ISR_DOORBELL_PEND)
ft1000_parse_dpram_msg(dev);
@@ -2008,7 +2038,7 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
cnt = 0;
do {
- // Check if we have packets in the Downlink FIFO
+ /* Check if we have packets in the Downlink FIFO */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword =
ft1000_read_reg(dev,
@@ -2027,12 +2057,12 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
} while (cnt < MAX_RCV_LOOP);
}
- // clear interrupts
+ /* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
- // Read interrupt type
+ /* Read interrupt type */
inttype = ft1000_read_reg (dev, FT1000_REG_SUP_ISR);
DEBUG(1,"ft1000_hw: interrupt status register after clear = 0x%x\n",inttype);
}
@@ -2044,7 +2074,7 @@ void stop_ft1000_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
struct prov_record *ptr;
-// int cnt;
+ /* int cnt; */
DEBUG(0, "ft1000_hw: stop_ft1000_card()\n");
@@ -2053,7 +2083,7 @@ void stop_ft1000_card(struct net_device *dev)
netif_stop_queue(dev);
ft1000_disable_interrupts(dev);
- // Make sure we free any memory reserve for provisioning
+ /* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
ptr = list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
@@ -2109,7 +2139,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
struct ft1000_pcmcia *pcmcia;
struct net_device *dev;
- static const struct net_device_ops ft1000ops = // Slavius 21.10.2009 due to kernel changes
+ static const struct net_device_ops ft1000ops = /* Slavius 21.10.2009 due to kernel changes */
{
.ndo_open = &ft1000_open,
.ndo_stop = &ft1000_close,
@@ -2169,12 +2199,12 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
info->squeseqnum = 0;
-// dev->hard_start_xmit = &ft1000_start_xmit;
-// dev->get_stats = &ft1000_stats;
-// dev->open = &ft1000_open;
-// dev->stop = &ft1000_close;
+ /* dev->hard_start_xmit = &ft1000_start_xmit; */
+ /* dev->get_stats = &ft1000_stats; */
+ /* dev->open = &ft1000_open; */
+ /* dev->stop = &ft1000_close; */
- dev->netdev_ops = &ft1000ops; // Slavius 21.10.2009 due to kernel changes
+ dev->netdev_ops = &ft1000ops; /* Slavius 21.10.2009 due to kernel changes */
DEBUG(0, "device name = %s\n", dev->name);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 68a55ce..ffdc7f5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -560,6 +560,8 @@ static long ft1000_ioctl(struct file *file, unsigned int command,
/* Get the length field to see how many bytes to copy */
result = get_user(msgsz, (__u16 __user *)argp);
+ if (result)
+ break;
msgsz = ntohs(msgsz);
/* DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz); */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 12f333f..cab9cdf 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -4,7 +4,6 @@
* This file is part of Express Card USB Driver
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -55,7 +54,7 @@
#define MAX_LENGTH 0x7f0
-// Temporary download mechanism for Magnemite
+/* Temporary download mechanism for Magnemite */
#define DWNLD_MAG_TYPE_LOC 0x00
#define DWNLD_MAG_LEN_LOC 0x01
#define DWNLD_MAG_ADDR_LOC 0x02
@@ -74,36 +73,36 @@
#define HANDSHAKE_MAG_TIMEOUT_VALUE 0xF1F1
-// New Magnemite downloader
+/* New Magnemite downloader */
#define DWNLD_MAG1_HANDSHAKE_LOC 0x00
#define DWNLD_MAG1_TYPE_LOC 0x01
#define DWNLD_MAG1_SIZE_LOC 0x02
#define DWNLD_MAG1_PS_HDR_LOC 0x03
struct dsp_file_hdr {
- long version_id; // Version ID of this image format.
- long package_id; // Package ID of code release.
- long build_date; // Date/time stamp when file was built.
- long commands_offset; // Offset to attached commands in Pseudo Hdr format.
- long loader_offset; // Offset to bootloader code.
- long loader_code_address; // Start address of bootloader.
- long loader_code_end; // Where bootloader code ends.
- long loader_code_size;
- long version_data_offset; // Offset were scrambled version data begins.
- long version_data_size; // Size, in words, of scrambled version data.
- long nDspImages; // Number of DSP images in file.
+ long version_id; /* Version ID of this image format. */
+ long package_id; /* Package ID of code release. */
+ long build_date; /* Date/time stamp when file was built. */
+ long commands_offset; /* Offset to attached commands in Pseudo Hdr format. */
+ long loader_offset; /* Offset to bootloader code. */
+ long loader_code_address; /* Start address of bootloader. */
+ long loader_code_end; /* Where bootloader code ends. */
+ long loader_code_size;
+ long version_data_offset; /* Offset were scrambled version data begins. */
+ long version_data_size; /* Size, in words, of scrambled version data. */
+ long nDspImages; /* Number of DSP images in file. */
};
#pragma pack(1)
struct dsp_image_info {
- long coff_date; // Date/time when DSP Coff image was built.
- long begin_offset; // Offset in file where image begins.
- long end_offset; // Offset in file where image begins.
- long run_address; // On chip Start address of DSP code.
- long image_size; // Size of image.
- long version; // Embedded version # of DSP code.
- unsigned short checksum; // DSP File checksum
- unsigned short pad1;
+ long coff_date; /* Date/time when DSP Coff image was built. */
+ long begin_offset; /* Offset in file where image begins. */
+ long end_offset; /* Offset in file where image begins. */
+ long run_address; /* On chip Start address of DSP code. */
+ long image_size; /* Size of image. */
+ long version; /* Embedded version # of DSP code. */
+ unsigned short checksum; /* DSP File checksum */
+ unsigned short pad1;
};
@@ -151,7 +150,7 @@ static int check_usb_db(struct ft1000_usb *ft1000dev)
}
}
- return HANDSHAKE_MAG_TIMEOUT_VALUE;
+ return -1;
}
/* gets the handshake and compares it with the expected value */
@@ -172,9 +171,8 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
ft1000dev->fcodeldr);
ft1000dev->fcodeldr = 0;
status = check_usb_db(ft1000dev);
- if (status != STATUS_SUCCESS) {
+ if (status != 0) {
DEBUG("get_handshake: check_usb_db failed\n");
- status = STATUS_FAILURE;
break;
}
status = ft1000_write_register(ft1000dev,
@@ -202,7 +200,7 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
}
/* write the handshake value to the handshake location */
-static void put_handshake(struct ft1000_usb *ft1000dev,u16 handshake_value)
+static void put_handshake(struct ft1000_usb *ft1000dev, u16 handshake_value)
{
u32 tempx;
u16 tempword;
@@ -268,11 +266,12 @@ static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
return HANDSHAKE_TIMEOUT_VALUE;
}
-static void put_handshake_usb(struct ft1000_usb *ft1000dev,u16 handshake_value)
+static void put_handshake_usb(struct ft1000_usb *ft1000dev, u16 handshake_value)
{
int i;
- for (i=0; i<1000; i++);
+ for (i = 0; i < 1000; i++)
+ ;
}
static u16 get_request_type(struct ft1000_usb *ft1000dev)
@@ -450,7 +449,7 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
long word_length)
{
- int status = STATUS_SUCCESS;
+ int status = 0;
u16 dpram;
int loopcnt, i;
u16 tempword;
@@ -499,7 +498,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
} else {
status = write_dpram32_and_check(ft1000dev, tempbuffer,
dpram);
- if (status != STATUS_SUCCESS) {
+ if (status != 0) {
DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]);
break;
}
@@ -509,9 +508,9 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
return status;
}
-static void usb_dnld_complete (struct urb *urb)
+static void usb_dnld_complete(struct urb *urb)
{
- //DEBUG("****** usb_dnld_complete\n");
+ /* DEBUG("****** usb_dnld_complete\n"); */
}
/* writes a block of DSP image to DPRAM
@@ -523,7 +522,7 @@ static void usb_dnld_complete (struct urb *urb)
static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
u8 **pUcFile, long word_length)
{
- int Status = STATUS_SUCCESS;
+ int Status = 0;
int byte_length;
byte_length = word_length * 4;
@@ -586,12 +585,12 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
/*NdisMSleep (100); */
if (word_length > MAX_LENGTH) {
DEBUG("FT1000:download:Download error: Max length exceeded\n");
- return STATUS_FAILURE;
+ return -1;
}
if ((word_length * 2 + (long)c_file) > (long)endpoint) {
/* Error, beyond boot code range.*/
DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length);
- return STATUS_FAILURE;
+ return -1;
}
if (word_length & 0x1)
word_length++;
@@ -601,11 +600,11 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
status = write_blk(ft1000dev, s_file, c_file, word_length);
/*DEBUG("write_blk returned %d\n", status); */
} else {
- write_blk_fifo(ft1000dev, s_file, c_file, word_length);
+ status = write_blk_fifo(ft1000dev, s_file, c_file, word_length);
if (ft1000dev->usbboot == 0)
ft1000dev->usbboot++;
if (ft1000dev->usbboot == 1)
- ft1000_write_dpram16(ft1000dev,
+ status |= ft1000_write_dpram16(ft1000dev,
DWNLD_MAG1_PS_HDR_LOC, 0, 0);
}
return status;
@@ -615,7 +614,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
u32 FileLength)
{
- int status = STATUS_SUCCESS;
+ int status = 0;
u32 state;
u16 handshake;
struct pseudo_hdr *pseudo_header;
@@ -651,9 +650,9 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
ft1000dev->usbboot = 0;
ft1000dev->dspalive = 0xffff;
- //
- // Get version id of file, at first 4 bytes of file, for newer files.
- //
+ /*
+ * Get version id of file, at first 4 bytes of file, for newer files.
+ */
state = STATE_START_DWNLD;
@@ -670,7 +669,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
loader_code_size = file_hdr->loader_code_size;
correct_version = false;
- while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE)) {
+ while ((status == 0) && (state != STATE_DONE_FILE)) {
switch (state) {
case STATE_START_DWNLD:
status = scram_start_dwnld(ft1000dev, &handshake,
@@ -702,8 +701,8 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/* Reposition ptrs to beginning of code section */
s_file = (u16 *) (boot_end);
c_file = (u8 *) (boot_end);
- //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file);
- //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file);
+ /* DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); */
+ /* DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
state = STATE_CODE_DWNLD;
ft1000dev->fcodeldr = 1;
break;
@@ -717,7 +716,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
DEBUG
("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",
request);
- status = STATUS_FAILURE;
+ status = -1;
break;
}
if (ft1000dev->usbboot)
@@ -729,13 +728,13 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Handshake failed\n");
- status = STATUS_FAILURE;
+ status = -1;
}
break;
case STATE_CODE_DWNLD:
- //DEBUG("FT1000:STATE_CODE_DWNLD\n");
+ /* DEBUG("FT1000:STATE_CODE_DWNLD\n"); */
ft1000dev->bootmode = 0;
if (ft1000dev->usbboot)
handshake =
@@ -773,7 +772,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Got Run address request before image offset request.\n");
- status = STATUS_FAILURE;
+ status = -1;
break;
}
break;
@@ -789,7 +788,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Got Size request before image offset request.\n");
- status = STATUS_FAILURE;
+ status = -1;
break;
}
break;
@@ -805,11 +804,11 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
state = STATE_DONE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
- //DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n");
+ /* DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); */
if (!correct_version) {
DEBUG
("FT1000:download:Download error: Got Code Segment request before image offset request.\n");
- status = STATUS_FAILURE;
+ status = -1;
break;
}
@@ -823,7 +822,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
case REQUEST_MAILBOX_DATA:
DEBUG
("FT1000:download: REQUEST_MAILBOX_DATA\n");
- // Convert length from byte count to word count. Make sure we round up.
+ /* Convert length from byte count to word count. Make sure we round up. */
word_length =
(long)(pft1000info->DSPInfoBlklen +
1) / 2;
@@ -836,7 +835,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
- data = (u16 *) & mailbox_data->data[0];
+ data = (u16 *) &mailbox_data->data[0];
dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
@@ -850,7 +849,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
status =
fix_ft1000_write_dpram32
(ft1000dev, dpram++,
- (u8 *) & templong);
+ (u8 *) &templong);
}
break;
@@ -939,7 +938,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
}
dsp_img_info++;
- } //end of for
+ } /* end of for */
if (!correct_version) {
/*
@@ -948,7 +947,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
DEBUG
("FT1000:download:Download error: Bad Version Request = 0x%x.\n",
(int)requested_version);
- status = STATUS_FAILURE;
+ status = -1;
break;
}
break;
@@ -957,7 +956,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
DEBUG
("FT1000:download:Download error: Bad request type=%d in CODE download state.\n",
request);
- status = STATUS_FAILURE;
+ status = -1;
break;
}
if (ft1000dev->usbboot)
@@ -969,7 +968,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Handshake failed\n");
- status = STATUS_FAILURE;
+ status = -1;
}
break;
@@ -1002,7 +1001,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
(u32) (pseudo_header_len +
sizeof(struct
pseudo_hdr)));
- // link provisioning data
+ /* link provisioning data */
pprov_record =
kmalloc(sizeof(struct prov_record),
GFP_ATOMIC);
@@ -1013,7 +1012,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
list,
&pft1000info->
prov_list);
- // Move to next entry if available
+ /* Move to next entry if available */
c_file =
(u8 *) ((unsigned long)
c_file +
@@ -1026,14 +1025,14 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
}
} else {
kfree(pbuffer);
- status = STATUS_FAILURE;
+ status = -1;
}
} else {
- status = STATUS_FAILURE;
+ status = -1;
}
} else {
/* Checksum did not compute */
- status = STATUS_FAILURE;
+ status = -1;
}
DEBUG
("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n",
@@ -1046,23 +1045,23 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
default:
- status = STATUS_FAILURE;
+ status = -1;
break;
} /* End Switch */
- if (status != STATUS_SUCCESS)
+ if (status != 0)
break;
/****
// Check if Card is present
status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
- break;
+ break;
}
status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
- break;
+ break;
}
****/
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 0d4931b..a433e33 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -1,12 +1,9 @@
-//=====================================================
-// CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-//
-//
-// This file is part of Express Card USB Driver
-//
-// $Id:
-//====================================================
-#include <linux/init.h>
+/* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
+*
+*
+* This file is part of Express Card USB Driver
+*/
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -27,7 +24,9 @@
#define HARLEY_READ_OPERATION 0xc1
#define HARLEY_WRITE_OPERATION 0x41
-//#define JDEBUG
+#if 0
+#define JDEBUG
+#endif
static int ft1000_submit_rx_urb(struct ft1000_info *info);
@@ -35,32 +34,22 @@ static u8 tempbuffer[1600];
#define MAX_RCV_LOOP 100
-//---------------------------------------------------------------------------
-// Function: ft1000_control
-//
-// Parameters: ft1000_usb - device structure
-// pipe - usb control message pipe
-// request - control request
-// requesttype - control message request type
-// value - value to be written or 0
-// index - register index
-// data - data buffer to hold the read/write values
-// size - data size
-// timeout - control message time out value
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function sends a control message via USB interface synchronously
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* send a control message via USB interface synchronously
+* Parameters: ft1000_usb - device structure
+* pipe - usb control message pipe
+* request - control request
+* requesttype - control message request type
+* value - value to be written or 0
+* index - register index
+* data - data buffer to hold the read/write values
+* size - data size
+* timeout - control message time out value
+*/
static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
{
- u16 ret;
+ int ret;
if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) {
DEBUG("ft1000dev or ft1000dev->dev == NULL, failure\n");
@@ -76,26 +65,11 @@ static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_read_register
-//
-// Parameters: ft1000_usb - device structure
-// Data - data buffer to hold the value read
-// nRegIndex - register index
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function returns the value in a register
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
-int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data,
+/* returns the value in a register */
+int ft1000_read_register(struct ft1000_usb *ft1000dev, u16 *Data,
u16 nRegIndx)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
ret = ft1000_control(ft1000dev,
usb_rcvctrlpipe(ft1000dev->dev, 0),
@@ -110,25 +84,11 @@ int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_write_register
-//
-// Parameters: ft1000_usb - device structure
-// value - value to write into a register
-// nRegIndex - register index
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes the value in a register
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* writes the value in a register */
int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value,
u16 nRegIndx)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
ret = ft1000_control(ft1000dev,
usb_sndctrlpipe(ft1000dev->dev, 0),
@@ -143,27 +103,11 @@ int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_read_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to read
-// buffer - data buffer to hold the data read
-// cnt - number of byte read from DPRAM
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function read a number of bytes from DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
+/* read a number of bytes from DPRAM */
int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u16 cnt)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
ret = ft1000_control(ft1000dev,
usb_rcvctrlpipe(ft1000dev->dev, 0),
@@ -178,26 +122,11 @@ int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_write_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to write the data
-// buffer - data buffer to write into DPRAM
-// cnt - number of bytes to write
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes into DPRAM a number of bytes
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* writes into DPRAM a number of bytes */
int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u16 cnt)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
if (cnt % 4)
cnt += cnt - (cnt % 4);
@@ -215,26 +144,11 @@ int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_read_dpram16
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to read
-// buffer - data buffer to hold the data read
-// hightlow - high or low 16 bit word
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function read 16 bits from DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* read 16 bits from DPRAM */
int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u8 highlow)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
u8 request;
if (highlow == 0)
@@ -255,25 +169,11 @@ int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_write_dpram16
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to write the data
-// value - 16bits value to write
-// hightlow - high or low 16 bit word
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes into DPRAM a number of bytes
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 highlow)
+/* write into DPRAM a number of bytes */
+int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value,
+ u8 highlow)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
u8 request;
if (highlow == 0)
@@ -294,33 +194,18 @@ int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 h
return ret;
}
-//---------------------------------------------------------------------------
-// Function: fix_ft1000_read_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to read
-// buffer - data buffer to hold the data read
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function read DPRAM 4 words at a time
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* read DPRAM 4 words at a time */
int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer)
{
u8 buf[16];
u16 pos;
- int ret = STATUS_SUCCESS;
+ int ret = 0;
pos = (indx / 4) * 4;
ret = ft1000_read_dpram32(ft1000dev, pos, buf, 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
pos = (indx % 4) * 4;
*buffer++ = buf[pos++];
*buffer++ = buf[pos++];
@@ -338,22 +223,7 @@ int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
}
-//---------------------------------------------------------------------------
-// Function: fix_ft1000_write_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to write
-// buffer - data buffer to write
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function write to DPRAM 4 words at a time
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* Description: This function write to DPRAM 4 words at a time */
int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
{
u16 pos1;
@@ -362,13 +232,13 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
u8 buf[32];
u8 resultbuffer[32];
u8 *pdata;
- int ret = STATUS_SUCCESS;
+ int ret = 0;
pos1 = (indx / 4) * 4;
pdata = buffer;
ret = ft1000_read_dpram32(ft1000dev, pos1, buf, 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
pos2 = (indx % 4)*4;
buf[pos2++] = *buffer++;
buf[pos2++] = *buffer++;
@@ -382,24 +252,24 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
buffer = pdata;
for (i = 0; i < 16; i++) {
if (buf[i] != resultbuffer[i])
- ret = STATUS_FAILURE;
+ ret = -1;
}
}
- if (ret == STATUS_FAILURE) {
+ if (ret == -1) {
ret = ft1000_write_dpram32(ft1000dev, pos1,
(u8 *)&tempbuffer[0], 16);
ret = ft1000_read_dpram32(ft1000dev, pos1,
(u8 *)&resultbuffer[0], 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
buffer = pdata;
for (i = 0; i < 16; i++) {
if (tempbuffer[i] != resultbuffer[i]) {
- ret = STATUS_FAILURE;
+ ret = -1;
DEBUG("%s Failed to write\n",
__func__);
}
@@ -410,20 +280,10 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
return ret;
}
-
-//------------------------------------------------------------------------
-//
-// Function: card_reset_dsp
-//
-// Synopsis: This function is called to reset or activate the DSP
-//
-// Arguments: value - reset or activate
-//
-// Returns: None
-//-----------------------------------------------------------------------
+/* reset or activate the DSP */
static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
{
- u16 status = STATUS_SUCCESS;
+ int status = 0;
u16 tempword;
status = ft1000_write_register(ft1000dev, HOST_INTF_BE,
@@ -457,21 +317,11 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
}
}
-//---------------------------------------------------------------------------
-// Function: card_send_command
-//
-// Parameters: ft1000_usb - device structure
-// ptempbuffer - command buffer
-// size - command buffer size
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function sends a command to ASIC
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* send a command to ASIC
+* Parameters: ft1000_usb - device structure
+* ptempbuffer - command buffer
+* size - command buffer size
+*/
void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
int size)
{
@@ -486,7 +336,7 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
if (temp & 0x0100)
- msleep(10);
+ usleep_range(900, 1100);
/* check for odd word */
size = size + 2;
@@ -496,29 +346,21 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
size += 4 - (size % 4);
ft1000_write_dpram32(ft1000dev, 0, commandbuf, size);
- msleep(1);
+ usleep_range(900, 1100);
ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX,
FT1000_REG_DOORBELL);
- msleep(1);
+ usleep_range(900, 1100);
ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
- if ((temp & 0x0100) == 0) {
- //DEBUG("card_send_command: Message sent\n");
- }
+#if 0
+ if ((temp & 0x0100) == 0)
+ DEBUG("card_send_command: Message sent\n");
+#endif
}
-//--------------------------------------------------------------------------
-//
-// Function: dsp_reload
-//
-// Synopsis: This function is called to load or reload the DSP
-//
-// Arguments: ft1000dev - device structure
-//
-// Returns: None
-//-----------------------------------------------------------------------
+/* load or reload the DSP */
int dsp_reload(struct ft1000_usb *ft1000dev)
{
int status;
@@ -559,7 +401,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
/* call codeloader */
status = scram_dnldr(ft1000dev, pFileStart, FileLength);
- if (status != STATUS_SUCCESS)
+ if (status != 0)
return -EIO;
msleep(1000);
@@ -569,17 +411,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_asic
-// Description: This function will call the Card Service function to reset the
-// ASIC.
-// Input:
-// dev - device structure
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/* call the Card Service function to reset the ASIC. */
static void ft1000_reset_asic(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -607,18 +439,6 @@ static void ft1000_reset_asic(struct net_device *dev)
DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword);
}
-
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_card
-// Description: This function will reset the card
-// Input:
-// dev - device structure
-// Output:
-// status - FALSE (card reset fail)
-// TRUE (card reset successful)
-//
-//---------------------------------------------------------------------------
static int ft1000_reset_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -666,19 +486,7 @@ static int ft1000_reset_card(struct net_device *dev)
return TRUE;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_usb_transmit_complete
-//
-// Parameters: urb - transmitted usb urb
-//
-//
-// Returns: none
-//
-// Description: This is the callback function when a urb is transmitted
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* callback function when a urb is transmitted */
static void ft1000_usb_transmit_complete(struct urb *urb)
{
@@ -690,22 +498,10 @@ static void ft1000_usb_transmit_complete(struct urb *urb)
netif_wake_queue(ft1000dev->net);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_down_pkt
-// Description: This function will take an ethernet packet and convert it to
-// a Flarion packet prior to sending it to the ASIC Downlink
-// FIFO.
-// Input:
-// dev - device structure
-// packet - address of ethernet packet
-// len - length of IP packet
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
-static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
+/* take an ethernet packet and convert it to a Flarion
+* packet prior to sending it to the ASIC Downlink FIFO.
+*/
+static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
{
struct ft1000_info *pInfo = netdev_priv(netdev);
struct ft1000_usb *pFt1000Dev = pInfo->priv;
@@ -769,20 +565,10 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
return 0;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_start_xmit
-//
-// Parameters: skb - socket buffer to be sent
-// dev - network device
-//
-//
-// Returns: none
-//
-// Description: transmit a ethernet packet
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* transmit an ethernet packet
+* Parameters: skb - socket buffer to be sent
+* dev - network device
+*/
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
@@ -827,20 +613,7 @@ err:
return NETDEV_TX_OK;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_open
-//
-// Parameters:
-// dev - network device
-//
-//
-// Returns: none
-//
-// Description: open the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* open the network driver */
static int ft1000_open(struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
@@ -871,29 +644,14 @@ static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
return &(info->stats);
}
-static const struct net_device_ops ftnet_ops =
-{
+static const struct net_device_ops ftnet_ops = {
.ndo_open = &ft1000_open,
.ndo_stop = &ft1000_close,
.ndo_start_xmit = &ft1000_start_xmit,
.ndo_get_stats = &ft1000_netdev_stats,
};
-//---------------------------------------------------------------------------
-// Function: init_ft1000_netdev
-//
-// Parameters: ft1000dev - device structure
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function initialize the network device
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
+/* initialize the network device */
static int ft1000_reset(void *dev)
{
ft1000_reset_card(dev);
@@ -931,14 +689,14 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
card_nr[1] = '\0';
ret_val = kstrtou8(card_nr, 10, &gCardIndex);
if (ret_val) {
- printk(KERN_ERR "Can't parse netdev\n");
+ netdev_err(ft1000dev->net, "Can't parse netdev\n");
goto err_net;
}
ft1000dev->CardNumber = gCardIndex;
DEBUG("card number = %d\n", ft1000dev->CardNumber);
} else {
- printk(KERN_ERR "ft1000: Invalid device name\n");
+ netdev_err(ft1000dev->net, "ft1000: Invalid device name\n");
ret_val = -ENXIO;
goto err_net;
}
@@ -1014,20 +772,7 @@ err_net:
return ret_val;
}
-//---------------------------------------------------------------------------
-// Function: reg_ft1000_netdev
-//
-// Parameters: ft1000dev - device structure
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function register the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* register the network driver */
int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
struct usb_interface *intf)
{
@@ -1060,19 +805,9 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_up_pkt
-// Description: This function will take a packet from the FIFO up link and
-// convert it into an ethernet packet and deliver it to the IP stack
-// Input:
-// urb - the receiving usb urb
-//
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/* take a packet from the FIFO up link and
+* convert it into an ethernet packet and deliver it to the IP stack
+*/
static int ft1000_copy_up_pkt(struct urb *urb)
{
struct ft1000_info *info = urb->context;
@@ -1090,9 +825,9 @@ static int ft1000_copy_up_pkt(struct urb *urb)
if (ft1000dev->status & FT1000_STATUS_CLOSING) {
DEBUG("network driver is closed, return\n");
- return STATUS_SUCCESS;
+ return 0;
}
- // Read length
+ /* Read length */
len = urb->transfer_buffer_length;
lena = urb->actual_length;
@@ -1105,7 +840,7 @@ static int ft1000_copy_up_pkt(struct urb *urb)
if (tempword != *chksum) {
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
- return STATUS_FAILURE;
+ return -1;
}
skb = dev_alloc_skb(len + 12 + 2);
@@ -1114,7 +849,7 @@ static int ft1000_copy_up_pkt(struct urb *urb)
DEBUG("ft1000_copy_up_pkt: No Network buffers available\n");
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
- return STATUS_FAILURE;
+ return -1;
}
pbuffer = (u8 *) skb_put(skb, len + 12);
@@ -1151,23 +886,11 @@ static int ft1000_copy_up_pkt(struct urb *urb)
ft1000_submit_rx_urb(info);
- return SUCCESS;
+ return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_submit_rx_urb
-// Description: the receiving function of the network driver
-//
-// Input:
-// info - a private structure contains the device information
-//
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/* the receiving function of the network driver */
static int ft1000_submit_rx_urb(struct ft1000_info *info)
{
int result;
@@ -1196,20 +919,7 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
return 0;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_close
-//
-// Parameters:
-// net - network device
-//
-//
-// Returns: none
-//
-// Description: close the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* close the network driver */
int ft1000_close(struct net_device *net)
{
struct ft1000_info *pInfo = netdev_priv(net);
@@ -1227,26 +937,14 @@ int ft1000_close(struct net_device *net)
return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_chkcard
-// Description: This function will check if the device is presently available on
-// the system.
-// Input:
-// dev - device structure
-// Output:
-// status - FALSE (device is not present)
-// TRUE (device is present)
-//
-//---------------------------------------------------------------------------
+/* check if the device is presently available on the system. */
static int ft1000_chkcard(struct ft1000_usb *dev)
{
u16 tempword;
- u16 status;
+ int status;
if (dev->fCondResetPend) {
- DEBUG
- ("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
+ DEBUG("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
return TRUE;
}
/* Mask register is used to check for device presence since it is never
@@ -1254,8 +952,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
*/
status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- DEBUG
- ("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
+ DEBUG("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
return FALSE;
}
/* The system will return the value of 0xffff for the version register
@@ -1264,30 +961,22 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID);
if (tempword != 0x1b01) {
dev->status |= FT1000_STATUS_CLOSING;
- DEBUG
- ("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
+ DEBUG("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
return FALSE;
}
return TRUE;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_receive_cmd
-// Description: This function will read a message from the dpram area.
-// Input:
-// dev - network device structure
-// pbuffer - caller supply address to buffer
-// pnxtph - pointer to next pseudo header
-// Output:
-// Status = 0 (unsuccessful)
-// = 1 (successful)
-//
-//---------------------------------------------------------------------------
+/* read a message from the dpram area.
+* Input:
+* dev - network device structure
+* pbuffer - caller supply address to buffer
+*/
static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
- int maxsz, u16 *pnxtph)
+ int maxsz)
{
- u16 size, ret;
+ u16 size;
+ int ret;
u16 *ppseudohdr;
int i;
u16 tempword;
@@ -1359,7 +1048,7 @@ static int ft1000_dsp_prov(void *arg)
struct prov_record *ptr;
struct pseudo_hdr *ppseudo_hdr;
u16 *pmsg;
- u16 status;
+ int status;
u16 TempShortBuf[256];
DEBUG("*** DspProv Entered\n");
@@ -1381,7 +1070,7 @@ static int ft1000_dsp_prov(void *arg)
i++;
if (i == 10) {
DEBUG("FT1000:ft1000_dsp_prov:message drop\n");
- return STATUS_FAILURE;
+ return -1;
}
ft1000_read_register(dev, &tempword,
FT1000_REG_DOORBELL);
@@ -1405,9 +1094,8 @@ static int ft1000_dsp_prov(void *arg)
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++) {
+ for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- }
TempShortBuf[0] = 0;
TempShortBuf[1] = htons(len);
@@ -1425,7 +1113,7 @@ static int ft1000_dsp_prov(void *arg)
kfree(ptr->pprov_data);
kfree(ptr);
}
- msleep(10);
+ usleep_range(9000, 11000);
}
DEBUG("DSP Provisioning List Entry finished\n");
@@ -1435,7 +1123,7 @@ static int ft1000_dsp_prov(void *arg)
dev->fProvComplete = true;
info->CardReady = 1;
- return STATUS_SUCCESS;
+ return 0;
}
static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
@@ -1449,7 +1137,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
u16 i;
struct pseudo_hdr *ppseudo_hdr;
u16 *pmsg;
- u16 status;
+ int status;
union {
u8 byte[2];
u16 wrd;
@@ -1457,7 +1145,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
char *cmdbuffer = kmalloc(1600, GFP_KERNEL);
if (!cmdbuffer)
- return STATUS_FAILURE;
+ return -1;
status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
@@ -1481,154 +1169,179 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
DEBUG("ft1000_proc_drvmsg:Command message type = 0x%x\n", msgtype);
switch (msgtype) {
case MEDIA_STATE:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = MEDIA_STATE");
-
- pmediamsg = (struct media_msg *)&cmdbuffer[0];
- if (info->ProgConStat != 0xFF) {
- if (pmediamsg->state) {
- DEBUG("Media is up\n");
- if (info->mediastate == 0) {
- if (dev->NetDevRegDone) {
- netif_wake_queue(dev->
- net);
- }
- info->mediastate = 1;
- }
- } else {
- DEBUG("Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- if (dev->NetDevRegDone) {
- }
- info->ConTm = 0;
- }
+ DEBUG("ft1000_proc_drvmsg:Command message type = MEDIA_STATE");
+ pmediamsg = (struct media_msg *)&cmdbuffer[0];
+ if (info->ProgConStat != 0xFF) {
+ if (pmediamsg->state) {
+ DEBUG("Media is up\n");
+ if (info->mediastate == 0) {
+ if (dev->NetDevRegDone)
+ netif_wake_queue(dev->net);
+ info->mediastate = 1;
}
} else {
DEBUG("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
- info->ConTm = 0;
+ if (dev->NetDevRegDone)
+ info->ConTm = 0;
}
}
- break;
+ } else {
+ DEBUG("Media is down\n");
+ if (info->mediastate == 1) {
+ info->mediastate = 0;
+ info->ConTm = 0;
+ }
}
+ break;
+ }
case DSP_INIT_MSG:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG");
-
- pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
- memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
- memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
- HWSERNUMSZ);
- memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
- memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
- DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
- info->eui64[0], info->eui64[1], info->eui64[2],
- info->eui64[3], info->eui64[4], info->eui64[5],
- info->eui64[6], info->eui64[7]);
- dev->net->dev_addr[0] = info->eui64[0];
- dev->net->dev_addr[1] = info->eui64[1];
- dev->net->dev_addr[2] = info->eui64[2];
- dev->net->dev_addr[3] = info->eui64[5];
- dev->net->dev_addr[4] = info->eui64[6];
- dev->net->dev_addr[5] = info->eui64[7];
-
- if (ntohs(pdspinitmsg->length) ==
- (sizeof(struct dsp_init_msg) - 20)) {
- memcpy(info->ProductMode,
- pdspinitmsg->ProductMode, MODESZ);
- memcpy(info->RfCalVer, pdspinitmsg->RfCalVer,
- CALVERSZ);
- memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
- CALDATESZ);
- DEBUG("RFCalVer = 0x%2x 0x%2x\n",
- info->RfCalVer[0], info->RfCalVer[1]);
- }
- break;
+ DEBUG("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG");
+ pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
+ memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
+ DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
+ info->DspVer[0], info->DspVer[1], info->DspVer[2],
+ info->DspVer[3]);
+ memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
+ HWSERNUMSZ);
+ memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
+ memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
+ DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
+ info->eui64[0], info->eui64[1], info->eui64[2],
+ info->eui64[3], info->eui64[4], info->eui64[5],
+ info->eui64[6], info->eui64[7]);
+ dev->net->dev_addr[0] = info->eui64[0];
+ dev->net->dev_addr[1] = info->eui64[1];
+ dev->net->dev_addr[2] = info->eui64[2];
+ dev->net->dev_addr[3] = info->eui64[5];
+ dev->net->dev_addr[4] = info->eui64[6];
+ dev->net->dev_addr[5] = info->eui64[7];
+
+ if (ntohs(pdspinitmsg->length) ==
+ (sizeof(struct dsp_init_msg) - 20)) {
+ memcpy(info->ProductMode, pdspinitmsg->ProductMode,
+ MODESZ);
+ memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ);
+ memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
+ CALDATESZ);
+ DEBUG("RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0],
+ info->RfCalVer[1]);
}
+ break;
+ }
case DSP_PROVISION:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n");
+ DEBUG("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n");
- /* kick off dspprov routine to start provisioning
- * Send provisioning data to DSP
- */
- if (list_empty(&info->prov_list) == 0) {
- dev->fProvComplete = false;
- status = ft1000_dsp_prov(dev);
- if (status != STATUS_SUCCESS)
- goto out;
- } else {
- dev->fProvComplete = true;
- status =
- ft1000_write_register(dev, FT1000_DB_HB,
- FT1000_REG_DOORBELL);
- DEBUG
- ("FT1000:drivermsg:No more DSP provisioning data in dsp image\n");
- }
- DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n");
- break;
+ /* kick off dspprov routine to start provisioning
+ * Send provisioning data to DSP
+ */
+ if (list_empty(&info->prov_list) == 0) {
+ dev->fProvComplete = false;
+ status = ft1000_dsp_prov(dev);
+ if (status != 0)
+ goto out;
+ } else {
+ dev->fProvComplete = true;
+ status = ft1000_write_register(dev, FT1000_DB_HB,
+ FT1000_REG_DOORBELL);
+ DEBUG("FT1000:drivermsg:No more DSP provisioning data in dsp image\n");
}
+ DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n");
+ break;
+ }
case DSP_STORE_INFO:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO");
-
- DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n");
- tempword = ntohs(pdrvmsg->length);
- info->DSPInfoBlklen = tempword;
- if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *) &pdrvmsg->data[0];
- for (i = 0; i < ((tempword + 1) / 2); i++) {
- DEBUG
- ("FT1000:drivermsg:dsp info data = 0x%x\n",
- *pmsg);
- info->DSPInfoBlk[i + 10] = *pmsg++;
- }
- } else {
- info->DSPInfoBlklen = 0;
+ DEBUG("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO");
+ DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n");
+ tempword = ntohs(pdrvmsg->length);
+ info->DSPInfoBlklen = tempword;
+ if (tempword < (MAX_DSP_SESS_REC - 4)) {
+ pmsg = (u16 *) &pdrvmsg->data[0];
+ for (i = 0; i < ((tempword + 1) / 2); i++) {
+ DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg);
+ info->DSPInfoBlk[i + 10] = *pmsg++;
}
- break;
+ } else {
+ info->DSPInfoBlklen = 0;
}
+ break;
+ }
case DSP_GET_INFO:{
- DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
- /* copy dsp info block to dsp */
- dev->DrvMsgPend = 1;
- /* allow any outstanding ioctl to finish */
+ DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
+ /* copy dsp info block to dsp */
+ dev->DrvMsgPend = 1;
+ /* allow any outstanding ioctl to finish */
+ mdelay(10);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- break;
- }
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX)
+ break;
}
- /* Put message into Slow Queue
- * Form Pseudo header
- */
- pmsg = (u16 *) info->DSPInfoBlk;
- *pmsg++ = 0;
- *pmsg++ =
- htons(info->DSPInfoBlklen + 20 +
- info->DSPInfoBlklen);
- ppseudo_hdr =
- (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2];
- ppseudo_hdr->length =
- htons(info->DSPInfoBlklen + 4 +
- info->DSPInfoBlklen);
+ }
+ /* Put message into Slow Queue Form Pseudo header */
+ pmsg = (u16 *) info->DSPInfoBlk;
+ *pmsg++ = 0;
+ *pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen);
+ ppseudo_hdr =
+ (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2];
+ ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4
+ + info->DSPInfoBlklen);
+ ppseudo_hdr->source = 0x10;
+ ppseudo_hdr->destination = 0x20;
+ ppseudo_hdr->portdest = 0;
+ ppseudo_hdr->portsrc = 0;
+ ppseudo_hdr->sh_str_id = 0;
+ ppseudo_hdr->control = 0;
+ ppseudo_hdr->rsvd1 = 0;
+ ppseudo_hdr->rsvd2 = 0;
+ ppseudo_hdr->qos_class = 0;
+ /* Insert slow queue sequence number */
+ ppseudo_hdr->seq_num = info->squeseqnum++;
+ /* Insert application id */
+ ppseudo_hdr->portsrc = 0;
+ /* Calculate new checksum */
+ ppseudo_hdr->checksum = *pmsg++;
+ for (i = 1; i < 7; i++)
+ ppseudo_hdr->checksum ^= *pmsg++;
+
+ info->DSPInfoBlk[10] = 0x7200;
+ info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
+ status = ft1000_write_dpram32(dev, 0,
+ (u8 *)&info->DSPInfoBlk[0],
+ (unsigned short)(info->DSPInfoBlklen + 22));
+ status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
+ FT1000_REG_DOORBELL);
+ dev->DrvMsgPend = 0;
+ break;
+ }
+ case GET_DRV_ERR_RPT_MSG:{
+ DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
+ /* copy driver error message to dsp */
+ dev->DrvMsgPend = 1;
+ /* allow any outstanding ioctl to finish */
+ mdelay(10);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ mdelay(10);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX)
+ mdelay(10);
+ }
+ if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
+ /* Put message into Slow Queue Form Pseudo header */
+ pmsg = (u16 *) &tempbuffer[0];
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+ ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
ppseudo_hdr->portdest = 0;
@@ -1647,293 +1360,245 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- info->DSPInfoBlk[10] = 0x7200;
- info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
- status =
- ft1000_write_dpram32(dev, 0,
- (u8 *) &info->DSPInfoBlk[0],
- (unsigned short)(info->
- DSPInfoBlklen
- + 22));
- status =
- ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
- dev->DrvMsgPend = 0;
-
- break;
+ pmsg = (u16 *) &tempbuffer[16];
+ *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
+ *pmsg++ = htons(0x000e);
+ *pmsg++ = htons(info->DSP_TIME[0]);
+ *pmsg++ = htons(info->DSP_TIME[1]);
+ *pmsg++ = htons(info->DSP_TIME[2]);
+ *pmsg++ = htons(info->DSP_TIME[3]);
+ convert.byte[0] = info->DspVer[0];
+ convert.byte[1] = info->DspVer[1];
+ *pmsg++ = convert.wrd;
+ convert.byte[0] = info->DspVer[2];
+ convert.byte[1] = info->DspVer[3];
+ *pmsg++ = convert.wrd;
+ *pmsg++ = htons(info->DrvErrNum);
+
+ card_send_command(dev, (unsigned char *)&tempbuffer[0],
+ (u16)(0x0012 + PSEUDOSZ));
+ info->DrvErrNum = 0;
}
-
- case GET_DRV_ERR_RPT_MSG:{
- DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
- /* copy driver error message to dsp */
- dev->DrvMsgPend = 1;
- /* allow any outstanding ioctl to finish */
- mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- mdelay(10);
- }
-
- if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- /* Put message into Slow Queue
- * Form Pseudo header
- */
- pmsg = (u16 *) &tempbuffer[0];
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- ppseudo_hdr->length = htons(0x0012);
- ppseudo_hdr->source = 0x10;
- ppseudo_hdr->destination = 0x20;
- ppseudo_hdr->portdest = 0;
- ppseudo_hdr->portsrc = 0;
- ppseudo_hdr->sh_str_id = 0;
- ppseudo_hdr->control = 0;
- ppseudo_hdr->rsvd1 = 0;
- ppseudo_hdr->rsvd2 = 0;
- ppseudo_hdr->qos_class = 0;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- /* Insert application id */
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- pmsg = (u16 *) &tempbuffer[16];
- *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
- *pmsg++ = htons(0x000e);
- *pmsg++ = htons(info->DSP_TIME[0]);
- *pmsg++ = htons(info->DSP_TIME[1]);
- *pmsg++ = htons(info->DSP_TIME[2]);
- *pmsg++ = htons(info->DSP_TIME[3]);
- convert.byte[0] = info->DspVer[0];
- convert.byte[1] = info->DspVer[1];
- *pmsg++ = convert.wrd;
- convert.byte[0] = info->DspVer[2];
- convert.byte[1] = info->DspVer[3];
- *pmsg++ = convert.wrd;
- *pmsg++ = htons(info->DrvErrNum);
-
- card_send_command(dev,
- (unsigned char *)&tempbuffer[0],
- (u16) (0x0012 + PSEUDOSZ));
- info->DrvErrNum = 0;
- }
- dev->DrvMsgPend = 0;
-
- break;
- }
-
+ dev->DrvMsgPend = 0;
+ break;
+ }
default:
break;
}
- status = STATUS_SUCCESS;
+ status = 0;
out:
kfree(cmdbuffer);
DEBUG("return from ft1000_proc_drvmsg\n");
return status;
}
-int ft1000_poll(void* dev_id)
+/* Check which application has registered for dsp broadcast messages */
+static int dsp_broadcast_msg_id(struct ft1000_usb *dev)
{
- struct ft1000_usb *dev = (struct ft1000_usb *)dev_id;
- struct ft1000_info *info = netdev_priv(dev->net);
+ struct dpram_blk *pdpram_blk;
+ unsigned long flags;
+ int i;
- u16 tempword;
- u16 status;
- u16 size;
- int i;
- u16 data;
- u16 modulo;
- u16 portid;
- u16 nxtph;
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if ((dev->app_info[i].DspBCMsgFlag)
+ && (dev->app_info[i].fileobject)
+ && (dev->app_info[i].NumOfMsg
+ < MAX_MSG_LIMIT)) {
+ pdpram_blk = ft1000_get_buffer(&freercvpool);
+ if (pdpram_blk == NULL) {
+ DEBUG("Out of memory in free receive command pool\n");
+ dev->app_info[i].nRxMsgMiss++;
+ return -1;
+ }
+ if (ft1000_receive_cmd(dev, pdpram_blk->pbuffer,
+ MAX_CMD_SQSIZE)) {
+ /* Put message into the
+ * appropriate application block
+ */
+ dev->app_info[i].nRxMsg++;
+ spin_lock_irqsave(&free_buff_lock, flags);
+ list_add_tail(&pdpram_blk->list,
+ &dev->app_info[i] .app_sqlist);
+ dev->app_info[i].NumOfMsg++;
+ spin_unlock_irqrestore(&free_buff_lock, flags);
+ wake_up_interruptible(&dev->app_info[i]
+ .wait_dpram_msg);
+ } else {
+ dev->app_info[i].nRxMsgMiss++;
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int handle_misc_portid(struct ft1000_usb *dev)
+{
struct dpram_blk *pdpram_blk;
- struct pseudo_hdr *ppseudo_hdr;
- unsigned long flags;
-
- if (ft1000_chkcard(dev) == FALSE) {
- DEBUG("ft1000_poll::ft1000_chkcard: failed\n");
- return STATUS_FAILURE;
- }
-
- status = ft1000_read_register (dev, &tempword, FT1000_REG_DOORBELL);
-
- if ( !status )
- {
-
- if (tempword & FT1000_DB_DPRAM_RX) {
-
- status = ft1000_read_dpram16(dev, 0x200, (u8 *)&data, 0);
- size = ntohs(data) + 16 + 2;
- if (size % 4) {
- modulo = 4 - (size % 4);
- size = size + modulo;
- }
- status = ft1000_read_dpram16(dev, 0x201, (u8 *)&portid, 1);
- portid &= 0xff;
-
- if (size < MAX_CMD_SQSIZE) {
- switch (portid)
- {
- case DRIVERID:
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
-
- status = ft1000_proc_drvmsg (dev, size);
- if (status != STATUS_SUCCESS )
- return status;
- break;
- case DSPBCMSGID:
- // This is a dsp broadcast message
- // Check which application has registered for dsp broadcast messages
-
- for (i=0; i<MAX_NUM_APP; i++) {
- if ( (dev->app_info[i].DspBCMsgFlag) && (dev->app_info[i].fileobject) &&
- (dev->app_info[i].NumOfMsg < MAX_MSG_LIMIT) )
- {
- nxtph = FT1000_DPRAM_RX_BASE + 2;
- pdpram_blk = ft1000_get_buffer (&freercvpool);
- if (pdpram_blk != NULL) {
- if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) {
- ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer;
- // Put message into the appropriate application block
- dev->app_info[i].nRxMsg++;
- spin_lock_irqsave(&free_buff_lock, flags);
- list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
- dev->app_info[i].NumOfMsg++;
- spin_unlock_irqrestore(&free_buff_lock, flags);
- wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
- }
- else {
- dev->app_info[i].nRxMsgMiss++;
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
- }
- }
- else {
- DEBUG("Out of memory in free receive command pool\n");
- dev->app_info[i].nRxMsgMiss++;
- }
- }
- }
- break;
- default:
- pdpram_blk = ft1000_get_buffer (&freercvpool);
-
- if (pdpram_blk != NULL) {
- if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) {
- ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer;
- // Search for correct application block
- for (i=0; i<MAX_NUM_APP; i++) {
- if (dev->app_info[i].app_id == ppseudo_hdr->portdest) {
- break;
- }
- }
-
- if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ppseudo_hdr->portdest);
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
- else {
- if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
- else {
- dev->app_info[i].nRxMsg++;
- // Put message into the appropriate application block
- list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
- dev->app_info[i].NumOfMsg++;
- }
- }
- }
- else {
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
- }
- else {
- DEBUG("Out of memory in free receive command pool\n");
- }
- break;
- }
- }
- else {
- DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size);
- }
- status = ft1000_write_register (dev, FT1000_DB_DPRAM_RX, FT1000_REG_DOORBELL);
- }
- else if (tempword & FT1000_DSP_ASIC_RESET) {
-
- // Let's reset the ASIC from the Host side as well
- status = ft1000_write_register (dev, ASIC_RESET_BIT, FT1000_REG_RESET);
- status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET);
- i = 0;
- while (tempword & ASIC_RESET_BIT) {
- status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET);
- msleep(10);
- i++;
- if (i==100)
- break;
- }
- if (i==100) {
- DEBUG("Unable to reset ASIC\n");
- return STATUS_SUCCESS;
- }
- msleep(10);
- // Program WMARK register
- status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK);
- // clear ASIC reset doorbell
- status = ft1000_write_register (dev, FT1000_DSP_ASIC_RESET, FT1000_REG_DOORBELL);
- msleep(10);
- }
- else if (tempword & FT1000_ASIC_RESET_REQ) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
-
- // clear ASIC reset request from DSP
- status = ft1000_write_register (dev, FT1000_ASIC_RESET_REQ, FT1000_REG_DOORBELL);
- status = ft1000_write_register (dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
- // copy dsp session record from Adapter block
- status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPSess.Rec[0], 1024);
- // Program WMARK register
- status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK);
- // ring doorbell to tell DSP that ASIC is out of reset
- status = ft1000_write_register (dev, FT1000_ASIC_RESET_DSP, FT1000_REG_DOORBELL);
- }
- else if (tempword & FT1000_DB_COND_RESET) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
-
- if (!dev->fAppMsgPend) {
- // Reset ASIC and DSP
-
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (u8 *)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (u8 *)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (u8 *)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX);
- info->CardReady = 0;
- info->DrvErrNum = DSP_CONDRESET_INFO;
- DEBUG("ft1000_hw:DSP conditional reset requested\n");
- info->ft1000_reset(dev->net);
- }
- else {
- dev->fProvComplete = false;
- dev->fCondResetPend = true;
- }
-
- ft1000_write_register(dev, FT1000_DB_COND_RESET, FT1000_REG_DOORBELL);
- }
-
- }
-
- return STATUS_SUCCESS;
+ int i;
+
+ pdpram_blk = ft1000_get_buffer(&freercvpool);
+ if (pdpram_blk == NULL) {
+ DEBUG("Out of memory in free receive command pool\n");
+ return -1;
+ }
+ if (!ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE))
+ goto exit_failure;
+ /* Search for correct application block */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (dev->app_info[i].app_id == ((struct pseudo_hdr *)
+ pdpram_blk->pbuffer)->portdest)
+ break;
+ }
+ if (i == MAX_NUM_APP) {
+ DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
+ goto exit_failure;
+ } else if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
+ goto exit_failure;
+ } else {
+ dev->app_info[i].nRxMsg++;
+ /* Put message into the appropriate application block */
+ list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
+ dev->app_info[i].NumOfMsg++;
+ }
+ return 0;
+
+exit_failure:
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ return -1;
+}
+
+int ft1000_poll(void *dev_id)
+{
+ struct ft1000_usb *dev = (struct ft1000_usb *)dev_id;
+ struct ft1000_info *info = netdev_priv(dev->net);
+ u16 tempword;
+ int status;
+ u16 size;
+ int i;
+ u16 data;
+ u16 modulo;
+ u16 portid;
+
+ if (ft1000_chkcard(dev) == FALSE) {
+ DEBUG("ft1000_poll::ft1000_chkcard: failed\n");
+ return -1;
+ }
+ status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
+ if (!status) {
+ if (tempword & FT1000_DB_DPRAM_RX) {
+ status = ft1000_read_dpram16(dev,
+ 0x200, (u8 *)&data, 0);
+ size = ntohs(data) + 16 + 2;
+ if (size % 4) {
+ modulo = 4 - (size % 4);
+ size = size + modulo;
+ }
+ status = ft1000_read_dpram16(dev, 0x201,
+ (u8 *)&portid, 1);
+ portid &= 0xff;
+ if (size < MAX_CMD_SQSIZE) {
+ switch (portid) {
+ case DRIVERID:
+ DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
+ status = ft1000_proc_drvmsg(dev, size);
+ if (status != 0)
+ return status;
+ break;
+ case DSPBCMSGID:
+ status = dsp_broadcast_msg_id(dev);
+ break;
+ default:
+ status = handle_misc_portid(dev);
+ break;
+ }
+ } else
+ DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size);
+ status = ft1000_write_register(dev,
+ FT1000_DB_DPRAM_RX,
+ FT1000_REG_DOORBELL);
+ } else if (tempword & FT1000_DSP_ASIC_RESET) {
+ /* Let's reset the ASIC from the Host side as well */
+ status = ft1000_write_register(dev, ASIC_RESET_BIT,
+ FT1000_REG_RESET);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_RESET);
+ i = 0;
+ while (tempword & ASIC_RESET_BIT) {
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_RESET);
+ usleep_range(9000, 11000);
+ i++;
+ if (i == 100)
+ break;
+ }
+ if (i == 100) {
+ DEBUG("Unable to reset ASIC\n");
+ return 0;
+ }
+ usleep_range(9000, 11000);
+ /* Program WMARK register */
+ status = ft1000_write_register(dev, 0x600,
+ FT1000_REG_MAG_WATERMARK);
+ /* clear ASIC reset doorbell */
+ status = ft1000_write_register(dev,
+ FT1000_DSP_ASIC_RESET,
+ FT1000_REG_DOORBELL);
+ usleep_range(9000, 11000);
+ } else if (tempword & FT1000_ASIC_RESET_REQ) {
+ DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
+ /* clear ASIC reset request from DSP */
+ status = ft1000_write_register(dev,
+ FT1000_ASIC_RESET_REQ,
+ FT1000_REG_DOORBELL);
+ status = ft1000_write_register(dev, HOST_INTF_BE,
+ FT1000_REG_SUP_CTRL);
+ /* copy dsp session record from Adapter block */
+ status = ft1000_write_dpram32(dev, 0,
+ (u8 *)&info->DSPSess.Rec[0], 1024);
+ status = ft1000_write_register(dev, 0x600,
+ FT1000_REG_MAG_WATERMARK);
+ /* ring doorbell to tell DSP that
+ * ASIC is out of reset
+ * */
+ status = ft1000_write_register(dev,
+ FT1000_ASIC_RESET_DSP,
+ FT1000_REG_DOORBELL);
+ } else if (tempword & FT1000_DB_COND_RESET) {
+ DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
+ if (!dev->fAppMsgPend) {
+ /* Reset ASIC and DSP */
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER0,
+ (u8 *)&(info->DSP_TIME[0]),
+ FT1000_MAG_DSP_TIMER0_INDX);
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER1,
+ (u8 *)&(info->DSP_TIME[1]),
+ FT1000_MAG_DSP_TIMER1_INDX);
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER2,
+ (u8 *)&(info->DSP_TIME[2]),
+ FT1000_MAG_DSP_TIMER2_INDX);
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER3,
+ (u8 *)&(info->DSP_TIME[3]),
+ FT1000_MAG_DSP_TIMER3_INDX);
+ info->CardReady = 0;
+ info->DrvErrNum = DSP_CONDRESET_INFO;
+ DEBUG("ft1000_hw:DSP conditional reset requested\n");
+ info->ft1000_reset(dev->net);
+ } else {
+ dev->fProvComplete = false;
+ dev->fCondResetPend = true;
+ }
+ ft1000_write_register(dev, FT1000_DB_COND_RESET,
+ FT1000_REG_DOORBELL);
+ }
+ }
+ return 0;
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index 5ead942..2575d0d 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -33,13 +33,13 @@
#define seq_putx(m, message, size, var) \
seq_printf(m, message); \
- for(i = 0; i < (size - 1); i++) \
+ for (i = 0; i < (size - 1); i++) \
seq_printf(m, "%02x:", var[i]); \
seq_printf(m, "%02x\n", var[i])
#define seq_putd(m, message, size, var) \
seq_printf(m, message); \
- for(i = 0; i < (size - 1); i++) \
+ for (i = 0; i < (size - 1); i++) \
seq_printf(m, "%d.", var[i]); \
seq_printf(m, "%d\n", var[i])
@@ -47,14 +47,14 @@
#define FTNET_PROC init_net.proc_net
-int ft1000_read_dpram16 (struct ft1000_usb *ft1000dev, u16 indx,
+int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer, u8 highlow);
static int ft1000ReadProc(struct seq_file *m, void *v)
{
- static const char *status[] = {
- "Idle (Disconnect)",
+ static const char *status[] = {
+ "Idle (Disconnect)",
"Searching",
"Active (Connected)",
"Waiting for L2",
@@ -127,10 +127,10 @@ static int ft1000ReadProc(struct seq_file *m, void *v)
}
seq_printf(m, "Connection Time: %02ld:%02ld:%02ld\n",
- ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
+ ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
seq_printf(m, "Connection Time[s]: %ld\n", delta);
seq_printf(m, "Asic ID: %s\n",
- (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
+ (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
seq_putx(m, "SKU: ", SKUSZ, info->Sku);
seq_putx(m, "EUI64: ", EUISZ, info->eui64);
seq_putd(m, "DSP version number: ", DSPVERSZ, info->DspVer);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index a8dd1e5..e40763e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -7,7 +7,6 @@
* $Id:
*====================================================
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -45,13 +44,13 @@ static int ft1000_poll_thread(void *arg)
msleep(10);
if (!gPollingfailed) {
ret = ft1000_poll(arg);
- if (ret != STATUS_SUCCESS) {
+ if (ret != 0) {
DEBUG("ft1000_poll_thread: polling failed\n");
gPollingfailed = true;
}
}
}
- return STATUS_SUCCESS;
+ return 0;
}
static int ft1000_probe(struct usb_interface *interface,
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index e8d00a9..a6fdd524 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -11,8 +11,6 @@
#define PSEUDOSZ 16
-#define SUCCESS 0x00
-
struct app_info_block {
u32 nTxMsg; /* DPRAM msg sent to DSP with app_id */
u32 nRxMsg; /* DPRAM msg rcv from dsp with app_id */
@@ -31,9 +29,6 @@ struct app_info_block {
#define FALSE 0
#define TRUE 1
-#define STATUS_SUCCESS 0
-#define STATUS_FAILURE 0x1001
-
#define FT1000_STATUS_CLOSING 0x01
#define DSPBCMSGID 0x10
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index a0812d9..9c7c926 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -9,3 +9,23 @@ config FIREWIRE_SERIAL
To compile this driver as a module, say M here: the module will
be called firewire-serial.
+
+if FIREWIRE_SERIAL
+
+config FWTTY_MAX_TOTAL_PORTS
+ int "Maximum number of serial ports supported"
+ default "64"
+ help
+ Set this to the maximum number of serial ports you want the
+ firewire-serial driver to support.
+
+config FWTTY_MAX_CARD_PORTS
+ int "Maximum number of serial ports supported per adapter"
+ range 0 FWTTY_MAX_TOTAL_PORTS
+ default "32"
+ help
+ Set this to the maximum number of serial ports each firewire
+ adapter supports. The actual number of serial ports registered
+ is set with the module parameter "ttys".
+
+endif
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 62df009..8af136e 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -136,14 +136,14 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
#ifdef FWTTY_PROFILING
-static void profile_fifo_avail(struct fwtty_port *port, unsigned *stat)
+static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
{
spin_lock_bh(&port->lock);
- profile_size_distrib(stat, dma_fifo_avail(&port->tx_fifo));
+ fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
spin_unlock_bh(&port->lock);
}
-static void dump_profile(struct seq_file *m, struct stats *stats)
+static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
{
/* for each stat, print sum of 0 to 2^k, then individually */
int k = 4;
@@ -183,8 +183,8 @@ static void dump_profile(struct seq_file *m, struct stats *stats)
}
#else
-#define profile_fifo_avail(port, stat)
-#define dump_profile(m, stats)
+#define fwtty_profile_fifo(port, stat)
+#define fwtty_dump_profile(m, stats)
#endif
/*
@@ -456,16 +456,27 @@ static int fwtty_write_port_status(struct fwtty_port *port)
return err;
}
-static void __fwtty_throttle(struct fwtty_port *port, struct tty_struct *tty)
+static void fwtty_throttle_port(struct fwtty_port *port)
{
+ struct tty_struct *tty;
unsigned old;
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ spin_lock_bh(&port->lock);
+
old = port->mctrl;
port->mctrl |= OOB_RX_THROTTLE;
if (C_CRTSCTS(tty))
port->mctrl &= ~TIOCM_RTS;
if (~old & OOB_RX_THROTTLE)
__fwtty_write_port_status(port);
+
+ spin_unlock_bh(&port->lock);
+
+ tty_kref_put(tty);
}
/**
@@ -532,80 +543,14 @@ static void fwtty_emit_breaks(struct work_struct *work)
port->icount.brk += brk;
}
-static void fwtty_pushrx(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(work, push);
- struct tty_struct *tty;
- struct buffered_rx *buf, *next;
- int n, c = 0;
-
- spin_lock_bh(&port->lock);
- list_for_each_entry_safe(buf, next, &port->buf_list, list) {
- n = tty_insert_flip_string_fixed_flag(&port->port, buf->data,
- TTY_NORMAL, buf->n);
- c += n;
- port->buffered -= n;
- if (n < buf->n) {
- if (n > 0) {
- memmove(buf->data, buf->data + n, buf->n - n);
- buf->n -= n;
- }
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- __fwtty_throttle(port, tty);
- tty_kref_put(tty);
- }
- break;
- } else {
- list_del(&buf->list);
- kfree(buf);
- }
- }
- if (c > 0)
- tty_flip_buffer_push(&port->port);
-
- if (list_empty(&port->buf_list))
- clear_bit(BUFFERING_RX, &port->flags);
- spin_unlock_bh(&port->lock);
-}
-
-static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n)
-{
- struct buffered_rx *buf;
- size_t size = (n + sizeof(struct buffered_rx) + 0xFF) & ~0xFF;
-
- if (port->buffered + n > HIGH_WATERMARK) {
- fwtty_err_ratelimited(port, "overflowed rx buffer: buffered: %d new: %zu wtrmk: %d\n",
- port->buffered, n, HIGH_WATERMARK);
- return 0;
- }
- buf = kmalloc(size, GFP_ATOMIC);
- if (!buf)
- return 0;
- INIT_LIST_HEAD(&buf->list);
- buf->n = n;
- memcpy(buf->data, d, n);
-
- spin_lock_bh(&port->lock);
- list_add_tail(&buf->list, &port->buf_list);
- port->buffered += n;
- if (port->buffered > port->stats.watermark)
- port->stats.watermark = port->buffered;
- set_bit(BUFFERING_RX, &port->flags);
- spin_unlock_bh(&port->lock);
-
- return n;
-}
-
static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
{
- struct tty_struct *tty;
int c, n = len;
unsigned lsr;
int err = 0;
fwtty_dbg(port, "%d\n", n);
- profile_size_distrib(port->stats.reads, n);
+ fwtty_profile_data(port->stats.reads, n);
if (port->write_only) {
n = 0;
@@ -636,31 +581,24 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
goto out;
}
- if (!test_bit(BUFFERING_RX, &port->flags)) {
- c = tty_insert_flip_string_fixed_flag(&port->port, data,
- TTY_NORMAL, n);
- if (c > 0)
- tty_flip_buffer_push(&port->port);
- n -= c;
-
- if (n) {
- /* start buffering and throttling */
- n -= fwtty_buffer_rx(port, &data[c], n);
-
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- spin_lock_bh(&port->lock);
- __fwtty_throttle(port, tty);
- spin_unlock_bh(&port->lock);
- tty_kref_put(tty);
- }
- }
- } else
- n -= fwtty_buffer_rx(port, data, n);
+ c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n);
+ if (c > 0)
+ tty_flip_buffer_push(&port->port);
+ n -= c;
if (n) {
port->overrun = true;
err = -EIO;
+ fwtty_err_ratelimited(port, "flip buffer overrun\n");
+
+ } else {
+ /* throttle the sender if remaining flip buffer space has
+ * reached high watermark to avoid losing data which may be
+ * in-flight. Since the AR request context is 32k, that much
+ * data may have _already_ been acked.
+ */
+ if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK)
+ fwtty_throttle_port(port);
}
out:
@@ -821,7 +759,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
if (n == -EAGAIN)
++port->stats.tx_stall;
else if (n == -ENODATA)
- profile_size_distrib(port->stats.txns, 0);
+ fwtty_profile_data(port->stats.txns, 0);
else {
++port->stats.fifo_errs;
fwtty_err_ratelimited(port, "fifo err: %d\n",
@@ -830,7 +768,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
break;
}
- profile_size_distrib(port->stats.txns, txn->dma_pended.len);
+ fwtty_profile_data(port->stats.txns, txn->dma_pended.len);
fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
peer->fifo_addr, txn->dma_pended.data,
@@ -1101,20 +1039,13 @@ static int fwtty_port_activate(struct tty_port *tty_port,
static void fwtty_port_shutdown(struct tty_port *tty_port)
{
struct fwtty_port *port = to_port(tty_port, port);
- struct buffered_rx *buf, *next;
/* TODO: cancel outstanding transactions */
cancel_delayed_work_sync(&port->emit_breaks);
cancel_delayed_work_sync(&port->drain);
- cancel_work_sync(&port->push);
spin_lock_bh(&port->lock);
- list_for_each_entry_safe(buf, next, &port->buf_list, list) {
- list_del(&buf->list);
- kfree(buf);
- }
- port->buffered = 0;
port->flags = 0;
port->break_ctl = 0;
port->overrun = 0;
@@ -1184,7 +1115,7 @@ static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
int n, len;
fwtty_dbg(port, "%d\n", c);
- profile_size_distrib(port->stats.writes, c);
+ fwtty_profile_data(port->stats.writes, c);
spin_lock_bh(&port->lock);
n = dma_fifo_in(&port->tx_fifo, buf, c);
@@ -1262,9 +1193,7 @@ static void fwtty_unthrottle(struct tty_struct *tty)
fwtty_dbg(port, "CRTSCTS: %d\n", (C_CRTSCTS(tty) != 0));
- profile_fifo_avail(port, port->stats.unthrottle);
-
- schedule_work(&port->push);
+ fwtty_profile_fifo(port, port->stats.unthrottle);
spin_lock_bh(&port->lock);
port->mctrl &= ~OOB_RX_THROTTLE;
@@ -1523,15 +1452,14 @@ static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
stats.tx_stall, stats.fifo_errs, stats.lost);
- seq_printf(m, " pkts:%d thr:%d wtrmk:%d", stats.sent, stats.throttled,
- stats.watermark);
+ seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled);
if (port->port.console) {
seq_puts(m, "\n ");
(*port->fwcon_ops->proc_show)(m, port->con_data);
}
- dump_profile(m, &port->stats);
+ fwtty_dump_profile(m, &port->stats);
}
static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
@@ -2297,13 +2225,12 @@ static int fwserial_create(struct fw_unit *unit)
port->index = FWTTY_INVALID_INDEX;
port->port.ops = &fwtty_port_ops;
port->serial = serial;
+ tty_buffer_set_limit(&port->port, 128 * 1024);
spin_lock_init(&port->lock);
INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx);
INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks);
INIT_WORK(&port->hangup, fwtty_do_hangup);
- INIT_WORK(&port->push, fwtty_pushrx);
- INIT_LIST_HEAD(&port->buf_list);
init_waitqueue_head(&port->wait_tx);
port->max_payload = link_speed_to_max_payload(SCODE_100);
dma_fifo_init(&port->tx_fifo);
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 2463501..54f7f9b 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -22,14 +22,14 @@
#ifdef FWTTY_PROFILING
#define DISTRIBUTION_MAX_SIZE 8192
#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void profile_size_distrib(unsigned stat[], unsigned val)
+static inline void fwtty_profile_data(unsigned stat[], unsigned val)
{
int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
++stat[n];
}
#else
#define DISTRIBUTION_MAX_INDEX 0
-#define profile_size_distrib(st, n)
+#define fwtty_profile_data(st, n)
#endif
/* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */
@@ -166,7 +166,6 @@ struct stats {
unsigned sent;
unsigned lost;
unsigned throttled;
- unsigned watermark;
unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
@@ -183,12 +182,6 @@ struct fwconsole_ops {
#define FWCON_NOTIFY_ATTACH 1
#define FWCON_NOTIFY_DETACH 2
-struct buffered_rx {
- struct list_head list;
- size_t n;
- unsigned char data[0];
-};
-
/**
* fwtty_port: structure used to track/represent underlying tty_port
* @port: underlying tty_port
@@ -223,11 +216,6 @@ struct buffered_rx {
* The work can race with the writer but concurrent sending is
* prevented with the IN_TX flag. Scheduled under lock to
* limit scheduling when fifo has just been drained.
- * @push: work responsible for pushing buffered rx to the ldisc.
- * rx can become buffered if the tty buffer is filled before the
- * ldisc throttles the sender.
- * @buf_list: list of buffered rx yet to be sent to ldisc
- * @buffered: byte count of buffered rx
* @tx_fifo: fifo used to store & block-up writes for dma to remote
* @max_payload: max bytes transmissable per dma (based on peer's max_payload)
* @status_mask: UART_LSR_* bitmask significant to rx (based on termios)
@@ -267,9 +255,6 @@ struct fwtty_port {
spinlock_t lock;
unsigned mctrl;
struct delayed_work drain;
- struct work_struct push;
- struct list_head buf_list;
- int buffered;
struct dma_fifo tx_fifo;
int max_payload;
unsigned status_mask;
@@ -291,7 +276,6 @@ struct fwtty_port {
/* bit #s for flags field */
#define IN_TX 0
#define STOP_TX 1
-#define BUFFERING_RX 2
/* bitmasks for special mctrl/mstatus bits */
#define OOB_RX_THROTTLE 0x00010000
@@ -307,8 +291,8 @@ struct fwtty_port {
#define FREQ_BREAKS (HZ / 50)
/* Ports are allocated in blocks of num_ports for each fw_card */
-#define MAX_CARD_PORTS 32 /* max # of ports per card */
-#define MAX_TOTAL_PORTS 64 /* max # of ports total */
+#define MAX_CARD_PORTS CONFIG_FWTTY_MAX_CARD_PORTS
+#define MAX_TOTAL_PORTS CONFIG_FWTTY_MAX_TOTAL_PORTS
/* tuning parameters */
#define FWTTY_PORT_TXFIFO_LEN 4096
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index c57a6ba..74a0360 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -44,18 +44,6 @@
*/
#define DEFAULT_MTU_SIZE 1500
-#define gdm_dev_endian(n) (\
- n->phy_dev->get_endian(n->phy_dev->priv_dev))
-
-#define gdm_lte_hci_send(n, d, l) (\
- n->phy_dev->send_hci_func(n->phy_dev->priv_dev, d, l, NULL, NULL))
-
-#define gdm_lte_sdu_send(n, d, l, c, b, i, t) (\
- n->phy_dev->send_sdu_func(n->phy_dev->priv_dev, d, l, n->pdn_table.dft_eps_id, 0, c, b, i, t))
-
-#define gdm_lte_rcv_with_cb(n, c, b, e) (\
- n->rcv_func(n->priv_dev, c, b, e))
-
#define IP_VERSION_4 4
#define IP_VERSION_6 6
@@ -458,13 +446,11 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
sscanf(dev->name, "lte%d", &idx);
- ret = gdm_lte_sdu_send(nic,
- data_buf,
- data_len,
- tx_complete,
- nic,
- idx,
- nic_type);
+ ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev,
+ data_buf, data_len,
+ nic->pdn_table.dft_eps_id, 0,
+ tx_complete, nic, idx,
+ nic_type);
if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) {
netif_stop_queue(dev);
@@ -503,14 +489,18 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
sscanf(dev->name, "lte%d", &idx);
return netlink_send(lte_event.sock, idx, 0, buf,
- gdm_dev16_to_cpu(gdm_dev_endian(nic), hci->len) + HCI_HEADER_SIZE);
+ gdm_dev16_to_cpu(
+ nic->phy_dev->get_endian(
+ nic->phy_dev->priv_dev), hci->len)
+ + HCI_HEADER_SIZE);
}
static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len)
{
struct nic *nic = netdev_priv(dev);
- gdm_lte_hci_send(nic, msg, len);
+ nic->phy_dev->send_hci_func(nic->phy_dev->priv_dev, msg, len, NULL,
+ NULL);
}
int gdm_lte_event_init(void)
@@ -688,8 +678,14 @@ static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len)
if (pdn_table->activate) {
nic->pdn_table.activate = pdn_table->activate;
- nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->dft_eps_id);
- nic->pdn_table.nic_type = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->nic_type);
+ nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(
+ nic->phy_dev->get_endian(
+ nic->phy_dev->priv_dev),
+ pdn_table->dft_eps_id);
+ nic->pdn_table.nic_type = gdm_dev32_to_cpu(
+ nic->phy_dev->get_endian(
+ nic->phy_dev->priv_dev),
+ pdn_table->nic_type);
netdev_info(dev, "pdn activated, nic_type=0x%x\n",
nic->pdn_table.nic_type);
@@ -762,7 +758,7 @@ void start_rx_proc(struct phy_dev *phy_dev)
int i;
for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
- gdm_lte_rcv_with_cb(phy_dev, rx_complete, phy_dev, USB_COMPLETE);
+ phy_dev->rcv_func(phy_dev->priv_dev, rx_complete, phy_dev, USB_COMPLETE);
}
static struct net_device_ops gdm_netdev_ops = {
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 6216367..2fa3a5a 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -158,7 +158,6 @@ static int up_to_host(struct mux_rx *r)
unsigned int start_flag;
unsigned int payload_size;
unsigned short packet_type;
- int remain;
int dummy_cnt;
u32 packet_size_sum = r->offset;
int index;
@@ -176,8 +175,7 @@ static int up_to_host(struct mux_rx *r)
break;
}
- remain = (MUX_HEADER_SIZE + payload_size) % 4;
- dummy_cnt = remain ? (4-remain) : 0;
+ dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
if (len - packet_size_sum <
MUX_HEADER_SIZE + payload_size + dummy_cnt) {
@@ -361,7 +359,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
struct mux_pkt_header *mux_header;
struct mux_tx *t = NULL;
static u32 seq_num = 1;
- int remain;
int dummy_cnt;
int total_len;
int ret;
@@ -375,8 +372,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
spin_lock_irqsave(&mux_dev->write_lock, flags);
- remain = (MUX_HEADER_SIZE + len) % 4;
- dummy_cnt = remain ? (4 - remain) : 0;
+ dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
total_len = len + MUX_HEADER_SIZE + dummy_cnt;
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index c0f7cd7..fe47cd3 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 781134a..33458a5 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -830,24 +830,19 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
if (bInterfaceNumber > NETWORK_INTERFACE) {
pr_info("not a network device\n");
- return -1;
+ return -ENODEV;
}
- phy_dev = kmalloc(sizeof(struct phy_dev), GFP_ATOMIC);
- if (!phy_dev) {
- ret = -ENOMEM;
- goto out;
- }
+ phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
+ if (!phy_dev)
+ return -ENOMEM;
- udev = kmalloc(sizeof(struct lte_udev), GFP_ATOMIC);
+ udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
if (!udev) {
ret = -ENOMEM;
- goto out;
+ goto err_udev;
}
- memset(phy_dev, 0, sizeof(struct phy_dev));
- memset(udev, 0, sizeof(struct lte_udev));
-
phy_dev->priv_dev = (void *)udev;
phy_dev->send_hci_func = gdm_usb_hci_send;
phy_dev->send_sdu_func = gdm_usb_sdu_send;
@@ -858,7 +853,7 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
ret = init_usb(udev);
if (ret < 0) {
pr_err("init_usb func failed\n");
- goto out;
+ goto err_init_usb;
}
udev->intf = intf;
@@ -875,23 +870,22 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
ret = request_mac_address(udev);
if (ret < 0) {
pr_err("request Mac address failed\n");
- goto out;
+ goto err_mac_address;
}
start_rx_proc(phy_dev);
-out:
-
- if (ret < 0) {
- kfree(phy_dev);
- if (udev) {
- release_usb(udev);
- kfree(udev);
- }
- }
-
usb_get_dev(usbdev);
usb_set_intfdata(intf, phy_dev);
+ return 0;
+
+err_mac_address:
+ release_usb(udev);
+err_init_usb:
+ kfree(udev);
+err_udev:
+ kfree(phy_dev);
+
return ret;
}
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index cc36924..50d43ad 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -97,7 +97,7 @@ void gdm_qos_init(void *nic_ptr)
struct qos_cb_s *qcb = &nic->qos;
int i;
- for (i = 0 ; i < QOS_MAX; i++) {
+ for (i = 0; i < QOS_MAX; i++) {
INIT_LIST_HEAD(&qcb->qos_list[i]);
qcb->csr[i].qos_buf_count = 0;
qcb->csr[i].enabled = 0;
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index e0cb2ff..f8788bf 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -780,9 +780,10 @@ static int k_mode_thread(void *arg)
spin_lock_irqsave(&k_lock, flags2);
}
+ wait_event_interruptible_lock_irq(k_wait,
+ !list_empty(&k_list) || k_mode_stop,
+ k_lock);
spin_unlock_irqrestore(&k_lock, flags2);
-
- interruptible_sleep_on(&k_wait);
}
return 0;
}
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
index 4302fcb..cbe5dcf 100644
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ b/drivers/staging/gdm72xx/sdio_boot.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index 81e2ad4..eca0873 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/platform_device.h>
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 4c9364b..6f38ca9 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -439,13 +439,13 @@ static int adis16220_probe(struct spi_device *spi)
indio_dev->channels = adis16220_channels;
indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
if (ret)
- goto error_unregister_dev;
+ return ret;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
if (ret)
@@ -470,8 +470,6 @@ error_rm_adc1_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
error_rm_accel_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
-error_unregister_dev:
- iio_device_unregister(indio_dev);
return ret;
}
@@ -482,7 +480,6 @@ static int adis16220_remove(struct spi_device *spi)
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
- iio_device_unregister(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 735c0a3..898653c 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -676,10 +676,10 @@ static const struct attribute_group lis3l02dq_attribute_group = {
static const struct iio_info lis3l02dq_info = {
.read_raw = &lis3l02dq_read_raw,
.write_raw = &lis3l02dq_write_raw,
- .read_event_value_new = &lis3l02dq_read_thresh,
- .write_event_value_new = &lis3l02dq_write_thresh,
- .write_event_config_new = &lis3l02dq_write_event_config,
- .read_event_config_new = &lis3l02dq_read_event_config,
+ .read_event_value = &lis3l02dq_read_thresh,
+ .write_event_value = &lis3l02dq_write_thresh,
+ .write_event_config = &lis3l02dq_write_event_config,
+ .read_event_config = &lis3l02dq_read_event_config,
.driver_module = THIS_MODULE,
.attrs = &lis3l02dq_attribute_group,
};
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index c49e6ef..7f6ccdf 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -1126,20 +1126,20 @@ static const struct iio_info sca3000_info = {
.attrs = &sca3000_attribute_group,
.read_raw = &sca3000_read_raw,
.event_attrs = &sca3000_event_attribute_group,
- .read_event_value_new = &sca3000_read_thresh,
- .write_event_value_new = &sca3000_write_thresh,
- .read_event_config_new = &sca3000_read_event_config,
- .write_event_config_new = &sca3000_write_event_config,
+ .read_event_value = &sca3000_read_thresh,
+ .write_event_value = &sca3000_write_thresh,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
.driver_module = THIS_MODULE,
};
static const struct iio_info sca3000_info_with_temp = {
.attrs = &sca3000_attribute_group_with_temp,
.read_raw = &sca3000_read_raw,
- .read_event_value_new = &sca3000_read_thresh,
- .write_event_value_new = &sca3000_write_thresh,
- .read_event_config_new = &sca3000_read_event_config,
- .write_event_config_new = &sca3000_write_event_config,
+ .read_event_value = &sca3000_read_thresh,
+ .write_event_value = &sca3000_write_thresh,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 8209fa5..1ac11f6 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -134,6 +134,8 @@ struct ad7280_state {
unsigned char aux_threshhigh;
unsigned char aux_threshlow;
unsigned char cb_mask[AD7280A_MAX_CHAIN];
+
+ __be32 buf[2] ____cacheline_aligned;
};
static void ad7280_crc8_build_table(unsigned char *crc_tab)
@@ -189,22 +191,22 @@ static void ad7280_delay(struct ad7280_state *st)
msleep(1);
}
-static int __ad7280_read32(struct spi_device *spi, unsigned *val)
+static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
{
- unsigned rx_buf, tx_buf = cpu_to_be32(AD7280A_READ_TXVAL);
int ret;
-
struct spi_transfer t = {
- .tx_buf = &tx_buf,
- .rx_buf = &rx_buf,
+ .tx_buf = &st->buf[0],
+ .rx_buf = &st->buf[1],
.len = 4,
};
- ret = spi_sync_transfer(spi, &t, 1);
+ st->buf[0] = cpu_to_be32(AD7280A_READ_TXVAL);
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
if (ret)
return ret;
- *val = be32_to_cpu(rx_buf);
+ *val = be32_to_cpu(st->buf[1]);
return 0;
}
@@ -216,9 +218,9 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
(val & 0xFF) << 13 | all << 12);
reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
- reg = cpu_to_be32(reg);
+ st->buf[0] = cpu_to_be32(reg);
- return spi_write(st->spi, &reg, 4);
+ return spi_write(st->spi, &st->buf[0], 4);
}
static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
@@ -248,7 +250,7 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
if (ret)
return ret;
- __ad7280_read32(st->spi, &tmp);
+ __ad7280_read32(st, &tmp);
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -286,7 +288,7 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
ad7280_delay(st);
- __ad7280_read32(st->spi, &tmp);
+ __ad7280_read32(st, &tmp);
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -319,7 +321,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
ad7280_delay(st);
for (i = 0; i < cnt; i++) {
- __ad7280_read32(st->spi, &tmp);
+ __ad7280_read32(st, &tmp);
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -362,7 +364,7 @@ static int ad7280_chain_setup(struct ad7280_state *st)
return ret;
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
- __ad7280_read32(st->spi, &val);
+ __ad7280_read32(st, &val);
if (val == 0)
return n - 1;
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index d13f8ae..357cef2 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -452,10 +452,10 @@ static const struct iio_chan_spec ad7291_channels[] = {
static const struct iio_info ad7291_info = {
.read_raw = &ad7291_read_raw,
- .read_event_config_new = &ad7291_read_event_config,
- .write_event_config_new = &ad7291_write_event_config,
- .read_event_value_new = &ad7291_read_event_value,
- .write_event_value_new = &ad7291_write_event_value,
+ .read_event_config = &ad7291_read_event_config,
+ .write_event_config = &ad7291_write_event_config,
+ .read_event_value = &ad7291_read_event_value,
+ .write_event_value = &ad7291_write_event_value,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 2083673..f0f05f1 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -239,7 +239,12 @@ static const struct attribute_group ad7606_attribute_group_range = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
.scan_index = num, \
- .scan_type = IIO_ST('s', 16, 16, 0), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const struct iio_chan_spec ad7606_8_channels[] = {
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 9f48e5c..2369cf2 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -412,7 +412,7 @@ static int ad7816_probe(struct spi_device *spi_dev)
return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi_dev->dev, indio_dev);
if (ret)
return ret;
@@ -422,15 +422,6 @@ static int ad7816_probe(struct spi_device *spi_dev)
return 0;
}
-static int ad7816_remove(struct spi_device *spi_dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
static const struct spi_device_id ad7816_id[] = {
{ "ad7816", 0 },
{ "ad7817", 0 },
@@ -446,7 +437,6 @@ static struct spi_driver ad7816_driver = {
.owner = THIS_MODULE,
},
.probe = ad7816_probe,
- .remove = ad7816_remove,
.id_table = ad7816_id,
};
module_spi_driver(ad7816_driver);
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 9428be8..5ea3641 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -377,9 +377,9 @@ static const struct iio_info ad7991_info = {
static const struct iio_info ad7993_4_7_8_info = {
.read_raw = &ad799x_read_raw,
.event_attrs = &ad799x_event_attrs_group,
- .read_event_config_new = &ad799x_read_event_config,
- .read_event_value_new = &ad799x_read_event_value,
- .write_event_value_new = &ad799x_write_event_value,
+ .read_event_config = &ad799x_read_event_config,
+ .read_event_value = &ad799x_read_event_value,
+ .write_event_value = &ad799x_write_event_value,
.driver_module = THIS_MODULE,
.update_scan_mode = ad7997_8_update_scan_mode,
};
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index ef0a21d..a876ce7 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -183,7 +183,7 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
iodev->channels = lpc32xx_adc_iio_channels;
iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
- retval = iio_device_register(iodev);
+ retval = devm_iio_device_register(&pdev->dev, iodev);
if (retval)
return retval;
@@ -192,15 +192,6 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
return 0;
}
-static int lpc32xx_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *iodev = platform_get_drvdata(pdev);
-
- iio_device_unregister(iodev);
-
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id lpc32xx_adc_match[] = {
{ .compatible = "nxp,lpc3220-adc" },
@@ -211,7 +202,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
static struct platform_driver lpc32xx_adc_driver = {
.probe = lpc32xx_adc_probe,
- .remove = lpc32xx_adc_remove,
.driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index e2dd783..df71669 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -38,6 +38,7 @@
#include <linux/clk.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
@@ -111,16 +112,59 @@ static const char * const mx28_lradc_irq_names[] = {
struct mxs_lradc_of_config {
const int irq_count;
const char * const *irq_name;
+ const uint32_t *vref_mv;
+};
+
+#define VREF_MV_BASE 1850
+
+static const uint32_t mx23_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
+ VREF_MV_BASE, /* CH0 */
+ VREF_MV_BASE, /* CH1 */
+ VREF_MV_BASE, /* CH2 */
+ VREF_MV_BASE, /* CH3 */
+ VREF_MV_BASE, /* CH4 */
+ VREF_MV_BASE, /* CH5 */
+ VREF_MV_BASE * 2, /* CH6 VDDIO */
+ VREF_MV_BASE * 4, /* CH7 VBATT */
+ VREF_MV_BASE, /* CH8 Temp sense 0 */
+ VREF_MV_BASE, /* CH9 Temp sense 1 */
+ VREF_MV_BASE, /* CH10 */
+ VREF_MV_BASE, /* CH11 */
+ VREF_MV_BASE, /* CH12 USB_DP */
+ VREF_MV_BASE, /* CH13 USB_DN */
+ VREF_MV_BASE, /* CH14 VBG */
+ VREF_MV_BASE * 4, /* CH15 VDD5V */
+};
+
+static const uint32_t mx28_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
+ VREF_MV_BASE, /* CH0 */
+ VREF_MV_BASE, /* CH1 */
+ VREF_MV_BASE, /* CH2 */
+ VREF_MV_BASE, /* CH3 */
+ VREF_MV_BASE, /* CH4 */
+ VREF_MV_BASE, /* CH5 */
+ VREF_MV_BASE, /* CH6 */
+ VREF_MV_BASE * 4, /* CH7 VBATT */
+ VREF_MV_BASE, /* CH8 Temp sense 0 */
+ VREF_MV_BASE, /* CH9 Temp sense 1 */
+ VREF_MV_BASE * 2, /* CH10 VDDIO */
+ VREF_MV_BASE, /* CH11 VTH */
+ VREF_MV_BASE * 2, /* CH12 VDDA */
+ VREF_MV_BASE, /* CH13 VDDD */
+ VREF_MV_BASE, /* CH14 VBG */
+ VREF_MV_BASE * 4, /* CH15 VDD5V */
};
static const struct mxs_lradc_of_config mxs_lradc_of_config[] = {
[IMX23_LRADC] = {
.irq_count = ARRAY_SIZE(mx23_lradc_irq_names),
.irq_name = mx23_lradc_irq_names,
+ .vref_mv = mx23_vref_mv,
},
[IMX28_LRADC] = {
.irq_count = ARRAY_SIZE(mx28_lradc_irq_names),
.irq_name = mx28_lradc_irq_names,
+ .vref_mv = mx28_vref_mv,
},
};
@@ -141,6 +185,16 @@ enum lradc_ts_plate {
LRADC_SAMPLE_VALID,
};
+enum mxs_lradc_divbytwo {
+ MXS_LRADC_DIV_DISABLED = 0,
+ MXS_LRADC_DIV_ENABLED,
+};
+
+struct mxs_lradc_scale {
+ unsigned int integer;
+ unsigned int nano;
+};
+
struct mxs_lradc {
struct device *dev;
void __iomem *base;
@@ -155,6 +209,10 @@ struct mxs_lradc {
struct completion completion;
+ const uint32_t *vref_mv;
+ struct mxs_lradc_scale scale_avail[LRADC_MAX_TOTAL_CHANS][2];
+ unsigned long is_divided;
+
/*
* Touchscreen LRADC channels receives a private slot in the CTRL4
* register, the slot #7. Therefore only 7 slots instead of 8 in the
@@ -243,6 +301,7 @@ struct mxs_lradc {
#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
#define LRADC_CTRL2 0x20
+#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24
#define LRADC_CTRL2_TEMPSENSE_PWD (1 << 15)
#define LRADC_STATUS 0x40
@@ -759,20 +818,11 @@ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
/*
* Raw I/O operations
*/
-static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- int *val, int *val2, long m)
+static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
{
struct mxs_lradc *lradc = iio_priv(iio_dev);
int ret;
- if (m != IIO_CHAN_INFO_RAW)
- return -EINVAL;
-
- /* Check for invalid channel */
- if (chan->channel > LRADC_MAX_TOTAL_CHANS)
- return -EINVAL;
-
/*
* See if there is no buffered operation in progess. If there is, simply
* bail out. This can be improved to support both buffered and raw IO at
@@ -797,7 +847,7 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
/* Clean the slot's previous content, then set new one. */
mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4);
- mxs_lradc_reg_set(lradc, chan->channel, LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4);
mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
@@ -824,9 +874,206 @@ err:
return ret;
}
+static int mxs_lradc_read_temp(struct iio_dev *iio_dev, int *val)
+{
+ int ret, min, max;
+
+ ret = mxs_lradc_read_single(iio_dev, 8, &min);
+ if (ret != IIO_VAL_INT)
+ return ret;
+
+ ret = mxs_lradc_read_single(iio_dev, 9, &max);
+ if (ret != IIO_VAL_INT)
+ return ret;
+
+ *val = max - min;
+
+ return IIO_VAL_INT;
+}
+
+static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long m)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+
+ /* Check for invalid channel */
+ if (chan->channel > LRADC_MAX_TOTAL_CHANS)
+ return -EINVAL;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_TEMP)
+ return mxs_lradc_read_temp(iio_dev, val);
+
+ return mxs_lradc_read_single(iio_dev, chan->channel, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ /* From the datasheet, we have to multiply by 1.012 and
+ * divide by 4
+ */
+ *val = 0;
+ *val2 = 253000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ *val = lradc->vref_mv[chan->channel];
+ *val2 = chan->scan_type.realbits -
+ test_bit(chan->channel, &lradc->is_divided);
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP) {
+ /* The calculated value from the ADC is in Kelvin, we
+ * want Celsius for hwmon so the offset is
+ * -272.15 * scale
+ */
+ *val = -1075;
+ *val2 = 691699;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long m)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+ struct mxs_lradc_scale *scale_avail =
+ lradc->scale_avail[chan->channel];
+ int ret;
+
+ ret = mutex_trylock(&lradc->lock);
+ if (!ret)
+ return -EBUSY;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = -EINVAL;
+ if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
+ val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
+ /* divider by two disabled */
+ writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+ lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
+ clear_bit(chan->channel, &lradc->is_divided);
+ ret = 0;
+ } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
+ val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
+ /* divider by two enabled */
+ writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+ lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
+ set_bit(chan->channel, &lradc->is_divided);
+ ret = 0;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&lradc->lock);
+
+ return ret;
+}
+
+static int mxs_lradc_write_raw_get_fmt(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ long m)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static ssize_t mxs_lradc_show_scale_available_ch(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ int ch)
+{
+ struct iio_dev *iio = dev_to_iio_dev(dev);
+ struct mxs_lradc *lradc = iio_priv(iio);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lradc->scale_avail[ch]); i++)
+ len += sprintf(buf + len, "%d.%09u ",
+ lradc->scale_avail[ch][i].integer,
+ lradc->scale_avail[ch][i].nano);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t mxs_lradc_show_scale_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *iio_attr = to_iio_dev_attr(attr);
+
+ return mxs_lradc_show_scale_available_ch(dev, attr, buf,
+ iio_attr->address);
+}
+
+#define SHOW_SCALE_AVAILABLE_ATTR(ch) \
+static IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, S_IRUGO, \
+ mxs_lradc_show_scale_available, NULL, ch)
+
+SHOW_SCALE_AVAILABLE_ATTR(0);
+SHOW_SCALE_AVAILABLE_ATTR(1);
+SHOW_SCALE_AVAILABLE_ATTR(2);
+SHOW_SCALE_AVAILABLE_ATTR(3);
+SHOW_SCALE_AVAILABLE_ATTR(4);
+SHOW_SCALE_AVAILABLE_ATTR(5);
+SHOW_SCALE_AVAILABLE_ATTR(6);
+SHOW_SCALE_AVAILABLE_ATTR(7);
+SHOW_SCALE_AVAILABLE_ATTR(8);
+SHOW_SCALE_AVAILABLE_ATTR(9);
+SHOW_SCALE_AVAILABLE_ATTR(10);
+SHOW_SCALE_AVAILABLE_ATTR(11);
+SHOW_SCALE_AVAILABLE_ATTR(12);
+SHOW_SCALE_AVAILABLE_ATTR(13);
+SHOW_SCALE_AVAILABLE_ATTR(14);
+SHOW_SCALE_AVAILABLE_ATTR(15);
+
+static struct attribute *mxs_lradc_attributes[] = {
+ &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage1_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage2_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage3_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage4_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage13_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage14_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage15_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mxs_lradc_attribute_group = {
+ .attrs = mxs_lradc_attributes,
+};
+
static const struct iio_info mxs_lradc_iio_info = {
.driver_module = THIS_MODULE,
.read_raw = mxs_lradc_read_raw,
+ .write_raw = mxs_lradc_write_raw,
+ .write_raw_get_fmt = mxs_lradc_write_raw_get_fmt,
+ .attrs = &mxs_lradc_attribute_group,
};
static int mxs_lradc_ts_open(struct input_dev *dev)
@@ -1133,8 +1380,10 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
.type = (chan_type), \
.indexed = 1, \
.scan_index = (idx), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
.channel = (idx), \
+ .address = (idx), \
.scan_type = { \
.sign = 'u', \
.realbits = LRADC_RESOLUTION, \
@@ -1151,8 +1400,17 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
MXS_ADC_CHAN(5, IIO_VOLTAGE),
MXS_ADC_CHAN(6, IIO_VOLTAGE),
MXS_ADC_CHAN(7, IIO_VOLTAGE), /* VBATT */
- MXS_ADC_CHAN(8, IIO_TEMP), /* Temp sense 0 */
- MXS_ADC_CHAN(9, IIO_TEMP), /* Temp sense 1 */
+ /* Combined Temperature sensors */
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .scan_index = 8,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .channel = 8,
+ .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ },
MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
@@ -1271,7 +1529,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
struct iio_dev *iio;
struct resource *iores;
int ret = 0, touch_ret;
- int i;
+ int i, s;
+ unsigned int scale_uv;
/* Allocate the IIO device. */
iio = devm_iio_device_alloc(dev, sizeof(*lradc));
@@ -1316,6 +1575,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
return ret;
}
+ lradc->vref_mv = of_cfg->vref_mv;
+
platform_set_drvdata(pdev, iio);
init_completion(&lradc->completion);
@@ -1339,6 +1600,26 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (ret)
goto err_trig;
+ /* Populate available ADC input ranges */
+ for (i = 0; i < LRADC_MAX_TOTAL_CHANS; i++) {
+ for (s = 0; s < ARRAY_SIZE(lradc->scale_avail[i]); s++) {
+ /*
+ * [s=0] = optional divider by two disabled (default)
+ * [s=1] = optional divider by two enabled
+ *
+ * The scale is calculated by doing:
+ * Vref >> (realbits - s)
+ * which multiplies by two on the second component
+ * of the array.
+ */
+ scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
+ (iio->channels[i].scan_type.realbits - s);
+ lradc->scale_avail[i][s].nano =
+ do_div(scale_uv, 100000000) * 10;
+ lradc->scale_avail[i][s].integer = scale_uv;
+ }
+ }
+
/* Configure the hardware. */
ret = mxs_lradc_hw_init(lradc);
if (ret)
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 0feea55..75ddd4f 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -108,11 +108,6 @@ static int adt7316_i2c_probe(struct i2c_client *client,
return adt7316_probe(&client->dev, &bus, id->name);
}
-static int adt7316_i2c_remove(struct i2c_client *client)
-{
- return adt7316_remove(&client->dev);
-}
-
static const struct i2c_device_id adt7316_i2c_id[] = {
{ "adt7316", 0 },
{ "adt7317", 0 },
@@ -132,7 +127,6 @@ static struct i2c_driver adt7316_driver = {
.owner = THIS_MODULE,
},
.probe = adt7316_i2c_probe,
- .remove = adt7316_i2c_remove,
.id_table = adt7316_i2c_id,
};
module_i2c_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 7f4f0a8..e480abb 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -116,11 +116,6 @@ static int adt7316_spi_probe(struct spi_device *spi_dev)
return adt7316_probe(&spi_dev->dev, &bus, spi_dev->modalias);
}
-static int adt7316_spi_remove(struct spi_device *spi_dev)
-{
- return adt7316_remove(&spi_dev->dev);
-}
-
static const struct spi_device_id adt7316_spi_id[] = {
{ "adt7316", 0 },
{ "adt7317", 0 },
@@ -140,7 +135,6 @@ static struct spi_driver adt7316_driver = {
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
- .remove = adt7316_spi_remove,
.id_table = adt7316_spi_id,
};
module_spi_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 80266e8..16a8201 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2166,7 +2166,7 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
if (ret)
return -EIO;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return ret;
@@ -2177,16 +2177,6 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
}
EXPORT_SYMBOL(adt7316_probe);
-int adt7316_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-EXPORT_SYMBOL(adt7316_remove);
-
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital"
" temperature sensor, ADC and DAC driver");
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index 4d3efff..2dbfb49 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -31,6 +31,5 @@ extern const struct dev_pm_ops adt7316_pm_ops;
#define ADT7316_PM_OPS NULL
#endif
int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
-int adt7316_remove(struct device *dev);
#endif
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 7e7f989..047af23 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -576,10 +576,10 @@ static const struct iio_info ad7150_info = {
.event_attrs = &ad7150_event_attribute_group,
.driver_module = THIS_MODULE,
.read_raw = &ad7150_read_raw,
- .read_event_config_new = &ad7150_read_event_config,
- .write_event_config_new = &ad7150_write_event_config,
- .read_event_value_new = &ad7150_read_event_value,
- .write_event_value_new = &ad7150_write_event_value,
+ .read_event_config = &ad7150_read_event_config,
+ .write_event_config = &ad7150_write_event_config,
+ .read_event_value = &ad7150_read_event_value,
+ .write_event_value = &ad7150_write_event_value,
};
/*
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 862d68d..cbb1588 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -105,6 +105,11 @@ struct ad7746_chip_info {
u8 vt_setup;
u8 capdac[2][2];
s8 capdac_set;
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data ____cacheline_aligned;
};
enum ad7746_chan {
@@ -566,11 +571,6 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
int ret, delay;
u8 regval, reg;
- union {
- u32 d32;
- u8 d8[4];
- } data;
-
mutex_lock(&indio_dev->mlock);
switch (mask) {
@@ -591,12 +591,12 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
/* Now read the actual register */
ret = i2c_smbus_read_i2c_block_data(chip->client,
- chan->address >> 8, 3, &data.d8[1]);
+ chan->address >> 8, 3, &chip->data.d8[1]);
if (ret < 0)
goto out;
- *val = (be32_to_cpu(data.d32) & 0xFFFFFF) - 0x800000;
+ *val = (be32_to_cpu(chip->data.d32) & 0xFFFFFF) - 0x800000;
switch (chan->type) {
case IIO_TEMP:
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index c5b701f..386f4dc 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -92,9 +92,9 @@ struct ad9832_state {
* transfer buffers to live in their own cache lines.
*/
union {
- unsigned short freq_data[4]____cacheline_aligned;
- unsigned short phase_data[2];
- unsigned short data;
+ __be16 freq_data[4]____cacheline_aligned;
+ __be16 phase_data[2];
+ __be16 data;
};
};
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
index ed5ed8d..8ca6e52 100644
--- a/drivers/staging/iio/frequency/ad9834.h
+++ b/drivers/staging/iio/frequency/ad9834.h
@@ -65,8 +65,8 @@ struct ad9834_state {
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
- unsigned short data ____cacheline_aligned;
- unsigned short freq_data[2] ;
+ __be16 data ____cacheline_aligned;
+ __be16 freq_data[2];
};
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index 6d3d771..d5d395c 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -167,7 +167,7 @@ static int adis16060_r_probe(struct spi_device *spi)
indio_dev->channels = adis16060_channels;
indio_dev->num_channels = ARRAY_SIZE(adis16060_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
@@ -175,13 +175,6 @@ static int adis16060_r_probe(struct spi_device *spi)
return 0;
}
-/* fixme, confirm ordering in this function */
-static int adis16060_r_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
- return 0;
-}
-
static int adis16060_w_probe(struct spi_device *spi)
{
int ret;
@@ -211,7 +204,6 @@ static struct spi_driver adis16060_r_driver = {
.owner = THIS_MODULE,
},
.probe = adis16060_r_probe,
- .remove = adis16060_r_remove,
};
static struct spi_driver adis16060_w_driver = {
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index 1fac989..fd334a0 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -370,10 +370,10 @@ static const struct iio_info iio_dummy_info = {
.read_raw = &iio_dummy_read_raw,
.write_raw = &iio_dummy_write_raw,
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .read_event_config_new = &iio_simple_dummy_read_event_config,
- .write_event_config_new = &iio_simple_dummy_write_event_config,
- .read_event_value_new = &iio_simple_dummy_read_event_value,
- .write_event_value_new = &iio_simple_dummy_write_event_value,
+ .read_event_config = &iio_simple_dummy_read_event_config,
+ .write_event_config = &iio_simple_dummy_write_event_config,
+ .read_event_value = &iio_simple_dummy_read_event_value,
+ .write_event_value = &iio_simple_dummy_write_event_value,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
};
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 488e690..3660a43 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -585,7 +585,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev->name = id->name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
- err = iio_device_register(indio_dev);
+ err = devm_iio_device_register(&client->dev, indio_dev);
if (err) {
dev_err(&client->dev, "iio registration fails\n");
return err;
@@ -594,16 +594,6 @@ static int isl29018_probe(struct i2c_client *client,
return 0;
}
-static int isl29018_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- dev_dbg(&client->dev, "%s()\n", __func__);
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int isl29018_suspend(struct device *dev)
{
@@ -664,7 +654,6 @@ static struct i2c_driver isl29018_driver = {
.of_match_table = isl29018_of_match,
},
.probe = isl29018_probe,
- .remove = isl29018_remove,
.id_table = isl29018_id,
};
module_i2c_driver(isl29018_driver);
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 1880502..1e53808 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1672,10 +1672,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[PRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX],
@@ -1683,10 +1683,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[ALSPRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
@@ -1694,10 +1694,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[PRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
@@ -1705,10 +1705,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[ALSPRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
@@ -1716,10 +1716,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
};
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 0485d7f..d4f4dd9 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -629,10 +629,17 @@ static const struct i2c_device_id hmc5843_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hmc5843_id);
+static const struct of_device_id hmc5843_of_match[] = {
+ { .compatible = "honeywell,hmc5843" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hmc5843_of_match);
+
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
.pm = HMC5843_PM_OPS,
+ .of_match_table = hmc5843_of_match,
},
.id_table = hmc5843_id,
.probe = hmc5843_probe,
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 62d3017..36eedd8 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -131,7 +131,7 @@ static int ad2s1200_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad2s1200_channels);
indio_dev->name = spi_get_device_id(spi)->name;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
@@ -142,13 +142,6 @@ static int ad2s1200_probe(struct spi_device *spi)
return 0;
}
-static int ad2s1200_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
-
- return 0;
-}
-
static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1200" },
{ "ad2s1205" },
@@ -162,7 +155,6 @@ static struct spi_driver ad2s1200_driver = {
.owner = THIS_MODULE,
},
.probe = ad2s1200_probe,
- .remove = ad2s1200_remove,
.id_table = ad2s1200_id,
};
module_spi_driver(ad2s1200_driver);
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 5032ff7..78319ad 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -53,3 +53,9 @@ config DRM_IMX_IPUV3
depends on DRM_IMX_IPUV3_CORE
help
Choose this if you have a i.MX5 or i.MX6 processor.
+
+config DRM_IMX_HDMI
+ tristate "Freescale i.MX DRM HDMI"
+ depends on DRM_IMX
+ help
+ Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 8742432..4677585 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
+obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 96e4eee..09ef5fb8 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -448,6 +448,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
goto err_vblank;
}
+ platform_set_drvdata(drm->platformdev, drm);
mutex_unlock(&imxdrm->mutex);
return 0;
@@ -848,7 +849,7 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
static int imx_drm_platform_remove(struct platform_device *pdev)
{
- drm_platform_exit(&imx_drm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
new file mode 100644
index 0000000..f3a1f5e
--- /dev/null
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -0,0 +1,1916 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
+ * for SLISHDMI13T and SLIPHDMIT IP cores
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/of_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "ipu-v3/imx-ipu-v3.h"
+#include "imx-hdmi.h"
+#include "imx-drm.h"
+
+#define HDMI_EDID_LEN 512
+
+#define RGB 0
+#define YCBCR444 1
+#define YCBCR422_16BITS 2
+#define YCBCR422_8BITS 3
+#define XVYCC444 4
+
+enum hdmi_datamap {
+ RGB444_8B = 0x01,
+ RGB444_10B = 0x03,
+ RGB444_12B = 0x05,
+ RGB444_16B = 0x07,
+ YCbCr444_8B = 0x09,
+ YCbCr444_10B = 0x0B,
+ YCbCr444_12B = 0x0D,
+ YCbCr444_16B = 0x0F,
+ YCbCr422_8B = 0x16,
+ YCbCr422_10B = 0x14,
+ YCbCr422_12B = 0x12,
+};
+
+enum hdmi_colorimetry {
+ ITU601,
+ ITU709,
+};
+
+enum imx_hdmi_devtype {
+ IMX6Q_HDMI,
+ IMX6DL_HDMI,
+};
+
+static const u16 csc_coeff_default[3][4] = {
+ { 0x2000, 0x0000, 0x0000, 0x0000 },
+ { 0x0000, 0x2000, 0x0000, 0x0000 },
+ { 0x0000, 0x0000, 0x2000, 0x0000 }
+};
+
+static const u16 csc_coeff_rgb_out_eitu601[3][4] = {
+ { 0x2000, 0x6926, 0x74fd, 0x010e },
+ { 0x2000, 0x2cdd, 0x0000, 0x7e9a },
+ { 0x2000, 0x0000, 0x38b4, 0x7e3b }
+};
+
+static const u16 csc_coeff_rgb_out_eitu709[3][4] = {
+ { 0x2000, 0x7106, 0x7a02, 0x00a7 },
+ { 0x2000, 0x3264, 0x0000, 0x7e6d },
+ { 0x2000, 0x0000, 0x3b61, 0x7e25 }
+};
+
+static const u16 csc_coeff_rgb_in_eitu601[3][4] = {
+ { 0x2591, 0x1322, 0x074b, 0x0000 },
+ { 0x6535, 0x2000, 0x7acc, 0x0200 },
+ { 0x6acd, 0x7534, 0x2000, 0x0200 }
+};
+
+static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
+ { 0x2dc5, 0x0d9b, 0x049e, 0x0000 },
+ { 0x62f0, 0x2000, 0x7d11, 0x0200 },
+ { 0x6756, 0x78ab, 0x2000, 0x0200 }
+};
+
+struct hdmi_vmode {
+ bool mdvi;
+ bool mhsyncpolarity;
+ bool mvsyncpolarity;
+ bool minterlaced;
+ bool mdataenablepolarity;
+
+ unsigned int mpixelclock;
+ unsigned int mpixelrepetitioninput;
+ unsigned int mpixelrepetitionoutput;
+};
+
+struct hdmi_data_info {
+ unsigned int enc_in_format;
+ unsigned int enc_out_format;
+ unsigned int enc_color_depth;
+ unsigned int colorimetry;
+ unsigned int pix_repet_factor;
+ unsigned int hdcp_enable;
+ struct hdmi_vmode video_mode;
+};
+
+struct imx_hdmi {
+ struct drm_connector connector;
+ struct imx_drm_connector *imx_drm_connector;
+ struct drm_encoder encoder;
+ struct imx_drm_encoder *imx_drm_encoder;
+
+ enum imx_hdmi_devtype dev_type;
+ struct device *dev;
+ struct clk *isfr_clk;
+ struct clk *iahb_clk;
+
+ struct hdmi_data_info hdmi_data;
+ int vic;
+
+ u8 edid[HDMI_EDID_LEN];
+ bool cable_plugin;
+
+ bool phy_enabled;
+ struct drm_display_mode previous_mode;
+
+ struct regmap *regmap;
+ struct i2c_adapter *ddc;
+ void __iomem *regs;
+
+ unsigned long pixel_clk_rate;
+ unsigned int sample_rate;
+ int ratio;
+};
+
+static void imx_hdmi_set_ipu_di_mux(struct imx_hdmi *hdmi, int ipu_di)
+{
+ regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
+ IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
+ ipu_di << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
+}
+
+static inline void hdmi_writeb(struct imx_hdmi *hdmi, u8 val, int offset)
+{
+ writeb(val, hdmi->regs + offset);
+}
+
+static inline u8 hdmi_readb(struct imx_hdmi *hdmi, int offset)
+{
+ return readb(hdmi->regs + offset);
+}
+
+static void hdmi_mask_writeb(struct imx_hdmi *hdmi, u8 data, unsigned int reg,
+ u8 shift, u8 mask)
+{
+ u8 value = hdmi_readb(hdmi, reg) & ~mask;
+ value |= (data << shift) & mask;
+ hdmi_writeb(hdmi, value, reg);
+}
+
+static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi,
+ unsigned int value)
+{
+ u8 val;
+
+ hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
+ hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2);
+ hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3);
+
+ /* nshift factor = 0 */
+ val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
+ val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
+ hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+}
+
+static void hdmi_regenerate_cts(struct imx_hdmi *hdmi, unsigned int cts)
+{
+ u8 val;
+
+ /* Must be set/cleared first */
+ val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
+ val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
+ hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+
+ hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
+ hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
+ hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
+ HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+}
+
+static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
+ unsigned int ratio)
+{
+ unsigned int n = (128 * freq) / 1000;
+
+ switch (freq) {
+ case 32000:
+ if (pixel_clk == 25170000)
+ n = (ratio == 150) ? 9152 : 4576;
+ else if (pixel_clk == 27020000)
+ n = (ratio == 150) ? 8192 : 4096;
+ else if (pixel_clk == 74170000 || pixel_clk == 148350000)
+ n = 11648;
+ else
+ n = 4096;
+ break;
+
+ case 44100:
+ if (pixel_clk == 25170000)
+ n = 7007;
+ else if (pixel_clk == 74170000)
+ n = 17836;
+ else if (pixel_clk == 148350000)
+ n = (ratio == 150) ? 17836 : 8918;
+ else
+ n = 6272;
+ break;
+
+ case 48000:
+ if (pixel_clk == 25170000)
+ n = (ratio == 150) ? 9152 : 6864;
+ else if (pixel_clk == 27020000)
+ n = (ratio == 150) ? 8192 : 6144;
+ else if (pixel_clk == 74170000)
+ n = 11648;
+ else if (pixel_clk == 148350000)
+ n = (ratio == 150) ? 11648 : 5824;
+ else
+ n = 6144;
+ break;
+
+ case 88200:
+ n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
+ break;
+
+ case 96000:
+ n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
+ break;
+
+ case 176400:
+ n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
+ break;
+
+ case 192000:
+ n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
+ break;
+
+ default:
+ break;
+ }
+
+ return n;
+}
+
+static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
+ unsigned int ratio)
+{
+ unsigned int cts = 0;
+
+ pr_debug("%s: freq: %d pixel_clk: %ld ratio: %d\n", __func__, freq,
+ pixel_clk, ratio);
+
+ switch (freq) {
+ case 32000:
+ if (pixel_clk == 297000000) {
+ cts = 222750;
+ break;
+ }
+ case 48000:
+ case 96000:
+ case 192000:
+ switch (pixel_clk) {
+ case 25200000:
+ case 27000000:
+ case 54000000:
+ case 74250000:
+ case 148500000:
+ cts = pixel_clk / 1000;
+ break;
+ case 297000000:
+ cts = 247500;
+ break;
+ /*
+ * All other TMDS clocks are not supported by
+ * DWC_hdmi_tx. The TMDS clocks divided or
+ * multiplied by 1,001 coefficients are not
+ * supported.
+ */
+ default:
+ break;
+ }
+ break;
+ case 44100:
+ case 88200:
+ case 176400:
+ switch (pixel_clk) {
+ case 25200000:
+ cts = 28000;
+ break;
+ case 27000000:
+ cts = 30000;
+ break;
+ case 54000000:
+ cts = 60000;
+ break;
+ case 74250000:
+ cts = 82500;
+ break;
+ case 148500000:
+ cts = 165000;
+ break;
+ case 297000000:
+ cts = 247500;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (ratio == 100)
+ return cts;
+ else
+ return (cts * ratio) / 100;
+}
+
+static void hdmi_get_pixel_clk(struct imx_hdmi *hdmi)
+{
+ unsigned long rate;
+
+ rate = 65000000; /* FIXME */
+
+ if (rate)
+ hdmi->pixel_clk_rate = rate;
+}
+
+static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi)
+{
+ unsigned int clk_n, clk_cts;
+
+ clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+ clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+
+ if (!clk_cts) {
+ dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
+ __func__, hdmi->pixel_clk_rate);
+ return;
+ }
+
+ dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
+ __func__, hdmi->sample_rate, hdmi->ratio,
+ hdmi->pixel_clk_rate, clk_n, clk_cts);
+
+ hdmi_set_clock_regenerator_n(hdmi, clk_n);
+ hdmi_regenerate_cts(hdmi, clk_cts);
+}
+
+static void hdmi_init_clk_regenerator(struct imx_hdmi *hdmi)
+{
+ unsigned int clk_n, clk_cts;
+
+ clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+ clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+
+ if (!clk_cts) {
+ dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
+ __func__, hdmi->pixel_clk_rate);
+ return;
+ }
+
+ dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
+ __func__, hdmi->sample_rate, hdmi->ratio,
+ hdmi->pixel_clk_rate, clk_n, clk_cts);
+
+ hdmi_set_clock_regenerator_n(hdmi, clk_n);
+ hdmi_regenerate_cts(hdmi, clk_cts);
+}
+
+static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi)
+{
+ /* Get pixel clock from ipu */
+ hdmi_get_pixel_clk(hdmi);
+ hdmi_set_clk_regenerator(hdmi);
+}
+
+/*
+ * this submodule is responsible for the video data synchronization.
+ * for example, for RGB 4:4:4 input, the data map is defined as
+ * pin{47~40} <==> R[7:0]
+ * pin{31~24} <==> G[7:0]
+ * pin{15~8} <==> B[7:0]
+ */
+static void hdmi_video_sample(struct imx_hdmi *hdmi)
+{
+ int color_format = 0;
+ u8 val;
+
+ if (hdmi->hdmi_data.enc_in_format == RGB) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x01;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x03;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x05;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_format = 0x07;
+ else
+ return;
+ } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x09;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x0B;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x0D;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_format = 0x0F;
+ else
+ return;
+ } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x16;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x14;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x12;
+ else
+ return;
+ }
+
+ val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
+ ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) &
+ HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
+ hdmi_writeb(hdmi, val, HDMI_TX_INVID0);
+
+ /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */
+ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE |
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE |
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE;
+ hdmi_writeb(hdmi, val, HDMI_TX_INSTUFFING);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA0);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA1);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA0);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA1);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA0);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA1);
+}
+
+static int is_color_space_conversion(struct imx_hdmi *hdmi)
+{
+ return (hdmi->hdmi_data.enc_in_format !=
+ hdmi->hdmi_data.enc_out_format);
+}
+
+static int is_color_space_decimation(struct imx_hdmi *hdmi)
+{
+ return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
+ (hdmi->hdmi_data.enc_in_format == RGB ||
+ hdmi->hdmi_data.enc_in_format == YCBCR444));
+}
+
+static int is_color_space_interpolation(struct imx_hdmi *hdmi)
+{
+ return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
+ (hdmi->hdmi_data.enc_out_format == RGB ||
+ hdmi->hdmi_data.enc_out_format == YCBCR444));
+}
+
+static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
+{
+ const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ u32 csc_scale = 1;
+ u8 val;
+
+ if (is_color_space_conversion(hdmi)) {
+ if (hdmi->hdmi_data.enc_out_format == RGB) {
+ if (hdmi->hdmi_data.colorimetry == ITU601)
+ csc_coeff = &csc_coeff_rgb_out_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_out_eitu709;
+ } else if (hdmi->hdmi_data.enc_in_format == RGB) {
+ if (hdmi->hdmi_data.colorimetry == ITU601)
+ csc_coeff = &csc_coeff_rgb_in_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_in_eitu709;
+ csc_scale = 0;
+ }
+ }
+
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][0] & 0xff), HDMI_CSC_COEF_A1_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][0] >> 8), HDMI_CSC_COEF_A1_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][1] & 0xff), HDMI_CSC_COEF_A2_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][1] >> 8), HDMI_CSC_COEF_A2_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][2] & 0xff), HDMI_CSC_COEF_A3_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][2] >> 8), HDMI_CSC_COEF_A3_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][3] & 0xff), HDMI_CSC_COEF_A4_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][3] >> 8), HDMI_CSC_COEF_A4_MSB);
+
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][0] & 0xff), HDMI_CSC_COEF_B1_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][0] >> 8), HDMI_CSC_COEF_B1_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][1] & 0xff), HDMI_CSC_COEF_B2_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][1] >> 8), HDMI_CSC_COEF_B2_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][2] & 0xff), HDMI_CSC_COEF_B3_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][2] >> 8), HDMI_CSC_COEF_B3_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][3] & 0xff), HDMI_CSC_COEF_B4_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][3] >> 8), HDMI_CSC_COEF_B4_MSB);
+
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][0] & 0xff), HDMI_CSC_COEF_C1_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][0] >> 8), HDMI_CSC_COEF_C1_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][1] & 0xff), HDMI_CSC_COEF_C2_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][1] >> 8), HDMI_CSC_COEF_C2_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][2] & 0xff), HDMI_CSC_COEF_C3_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][2] >> 8), HDMI_CSC_COEF_C3_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][3] & 0xff), HDMI_CSC_COEF_C4_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][3] >> 8), HDMI_CSC_COEF_C4_MSB);
+
+ val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
+ val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
+ val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
+ hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+}
+
+static void hdmi_video_csc(struct imx_hdmi *hdmi)
+{
+ int color_depth = 0;
+ int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
+ int decimation = 0;
+ u8 val;
+
+ /* YCC422 interpolation to 444 mode */
+ if (is_color_space_interpolation(hdmi))
+ interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
+ else if (is_color_space_decimation(hdmi))
+ decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
+
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
+ else
+ return;
+
+ /* Configure the CSC registers */
+ hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG);
+ val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
+ val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
+ val |= color_depth;
+ hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+
+ imx_hdmi_update_csc_coeffs(hdmi);
+}
+
+/*
+ * HDMI video packetizer is used to packetize the data.
+ * for example, if input is YCC422 mode or repeater is used,
+ * data should be repacked this module can be bypassed.
+ */
+static void hdmi_video_packetize(struct imx_hdmi *hdmi)
+{
+ unsigned int color_depth = 0;
+ unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
+ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+ u8 val;
+
+ if (hdmi_data->enc_out_format == RGB
+ || hdmi_data->enc_out_format == YCBCR444) {
+ if (!hdmi_data->enc_color_depth)
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ else if (hdmi_data->enc_color_depth == 8) {
+ color_depth = 4;
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ } else if (hdmi_data->enc_color_depth == 10)
+ color_depth = 5;
+ else if (hdmi_data->enc_color_depth == 12)
+ color_depth = 6;
+ else if (hdmi_data->enc_color_depth == 16)
+ color_depth = 7;
+ else
+ return;
+ } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
+ if (!hdmi_data->enc_color_depth ||
+ hdmi_data->enc_color_depth == 8)
+ remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ else if (hdmi_data->enc_color_depth == 10)
+ remap_size = HDMI_VP_REMAP_YCC422_20bit;
+ else if (hdmi_data->enc_color_depth == 12)
+ remap_size = HDMI_VP_REMAP_YCC422_24bit;
+ else
+ return;
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
+ } else
+ return;
+
+ /* set the packetizer registers */
+ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
+ HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
+ ((hdmi_data->pix_repet_factor <<
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
+ hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
+
+ val = hdmi_readb(hdmi, HDMI_VP_STUFF);
+ val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
+ val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
+ hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+
+ /* Data from pixel repeater block */
+ if (hdmi_data->pix_repet_factor > 1) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK);
+ val |= HDMI_VP_CONF_PR_EN_ENABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else { /* data from packetizer block */
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK);
+ val |= HDMI_VP_CONF_PR_EN_DISABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ }
+
+ val = hdmi_readb(hdmi, HDMI_VP_STUFF);
+ val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
+ val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
+ hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+
+ hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP);
+
+ if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_ENABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_ENABLE;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else {
+ return;
+ }
+
+ val = hdmi_readb(hdmi, HDMI_VP_STUFF);
+ val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK);
+ val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
+ hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
+ val |= output_select;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+}
+
+static inline void hdmi_phy_test_clear(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
+ HDMI_PHY_TST0_TSTCLR_MASK;
+ hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_enable(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTEN_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
+ HDMI_PHY_TST0_TSTEN_MASK;
+ hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_clock(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
+ HDMI_PHY_TST0_TSTCLK_MASK;
+ hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_din(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ hdmi_writeb(hdmi, bit, HDMI_PHY_TST1);
+}
+
+static inline void hdmi_phy_test_dout(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ hdmi_writeb(hdmi, bit, HDMI_PHY_TST2);
+}
+
+static bool hdmi_phy_wait_i2c_done(struct imx_hdmi *hdmi, int msec)
+{
+ unsigned char val = 0;
+ val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3;
+ while (!val) {
+ udelay(1000);
+ if (msec-- == 0)
+ return false;
+ val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3;
+ }
+ return true;
+}
+
+static void __hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
+{
+ hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0);
+ hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
+ hdmi_writeb(hdmi, (unsigned char)(data >> 8),
+ HDMI_PHY_I2CM_DATAO_1_ADDR);
+ hdmi_writeb(hdmi, (unsigned char)(data >> 0),
+ HDMI_PHY_I2CM_DATAO_0_ADDR);
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
+ HDMI_PHY_I2CM_OPERATION_ADDR);
+ hdmi_phy_wait_i2c_done(hdmi, 1000);
+}
+
+static int hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
+{
+ __hdmi_phy_i2c_write(hdmi, data, addr);
+ return 0;
+}
+
+static void imx_hdmi_phy_enable_power(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_PDZ_OFFSET,
+ HDMI_PHY_CONF0_PDZ_MASK);
+}
+
+static void imx_hdmi_phy_enable_tmds(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_ENTMDS_OFFSET,
+ HDMI_PHY_CONF0_ENTMDS_MASK);
+}
+
+static void imx_hdmi_phy_gen2_pddq(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
+ HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
+}
+
+static void imx_hdmi_phy_gen2_txpwron(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
+}
+
+static void imx_hdmi_phy_sel_data_en_pol(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
+ HDMI_PHY_CONF0_SELDATAENPOL_MASK);
+}
+
+static void imx_hdmi_phy_sel_interface_control(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SELDIPIF_OFFSET,
+ HDMI_PHY_CONF0_SELDIPIF_MASK);
+}
+
+static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
+ unsigned char res, int cscon)
+{
+ u8 val, msec;
+
+ /* color resolution 0 is 8 bit colour depth */
+ if (!res)
+ res = 8;
+
+ if (prep)
+ return -EINVAL;
+ else if (res != 8 && res != 12)
+ return -EINVAL;
+
+ /* Enable csc path */
+ if (cscon)
+ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
+ else
+ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS;
+
+ hdmi_writeb(hdmi, val, HDMI_MC_FLOWCTRL);
+
+ /* gen2 tx power off */
+ imx_hdmi_phy_gen2_txpwron(hdmi, 0);
+
+ /* gen2 pddq */
+ imx_hdmi_phy_gen2_pddq(hdmi, 1);
+
+ /* PHY reset */
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
+
+ hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
+
+ hdmi_phy_test_clear(hdmi, 1);
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
+ HDMI_PHY_I2CM_SLAVE_ADDR);
+ hdmi_phy_test_clear(hdmi, 0);
+
+ if (hdmi->hdmi_data.video_mode.mpixelclock <= 45250000) {
+ switch (res) {
+ case 8:
+ /* PLL/MPLL Cfg */
+ hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 92500000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 148500000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (hdmi->hdmi_data.video_mode.mpixelclock <= 54000000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 58400000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 72000000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 74250000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 118800000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 216000000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ dev_err(hdmi->dev,
+ "Pixel clock %d - unsupported by HDMI\n",
+ hdmi->hdmi_data.video_mode.mpixelclock);
+ return -EINVAL;
+ }
+
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
+ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
+ /* RESISTANCE TERM 133Ohm Cfg */
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
+ /* PREEMP Cgf 0.00 */
+ hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
+ /* TX/CK LVL 10 */
+ hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
+ /* REMOVE CLK TERM */
+ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
+
+ imx_hdmi_phy_enable_power(hdmi, 1);
+
+ /* toggle TMDS enable */
+ imx_hdmi_phy_enable_tmds(hdmi, 0);
+ imx_hdmi_phy_enable_tmds(hdmi, 1);
+
+ /* gen2 tx power on */
+ imx_hdmi_phy_gen2_txpwron(hdmi, 1);
+ imx_hdmi_phy_gen2_pddq(hdmi, 0);
+
+ /*Wait for PHY PLL lock */
+ msec = 5;
+ do {
+ val = hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
+ if (!val)
+ break;
+
+ if (msec == 0) {
+ dev_err(hdmi->dev, "PHY PLL not locked\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1000);
+ msec--;
+ } while (1);
+
+ return 0;
+}
+
+static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
+{
+ int i, ret;
+ bool cscon = false;
+
+ /*check csc whether needed activated in HDMI mode */
+ cscon = (is_color_space_conversion(hdmi) &&
+ !hdmi->hdmi_data.video_mode.mdvi);
+
+ /* HDMI Phy spec says to do the phy initialization sequence twice */
+ for (i = 0; i < 2; i++) {
+ imx_hdmi_phy_sel_data_en_pol(hdmi, 1);
+ imx_hdmi_phy_sel_interface_control(hdmi, 0);
+ imx_hdmi_phy_enable_tmds(hdmi, 0);
+ imx_hdmi_phy_enable_power(hdmi, 0);
+
+ /* Enable CSC */
+ ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
+ if (ret)
+ return ret;
+ }
+
+ hdmi->phy_enabled = true;
+ return 0;
+}
+
+static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
+{
+ u8 de, val;
+
+ if (hdmi->hdmi_data.video_mode.mdataenablepolarity)
+ de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH;
+ else
+ de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW;
+
+ /* disable rx detect */
+ val = hdmi_readb(hdmi, HDMI_A_HDCPCFG0);
+ val &= HDMI_A_HDCPCFG0_RXDETECT_MASK;
+ val |= HDMI_A_HDCPCFG0_RXDETECT_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG0);
+
+ val = hdmi_readb(hdmi, HDMI_A_VIDPOLCFG);
+ val &= HDMI_A_VIDPOLCFG_DATAENPOL_MASK;
+ val |= de;
+ hdmi_writeb(hdmi, val, HDMI_A_VIDPOLCFG);
+
+ val = hdmi_readb(hdmi, HDMI_A_HDCPCFG1);
+ val &= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK;
+ val |= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG1);
+}
+
+static void hdmi_config_AVI(struct imx_hdmi *hdmi)
+{
+ u8 val, pix_fmt, under_scan;
+ u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
+ bool aspect_16_9;
+
+ aspect_16_9 = false; /* FIXME */
+
+ /* AVI Data Byte 1 */
+ if (hdmi->hdmi_data.enc_out_format == YCBCR444)
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+ else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+ else
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
+
+ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
+
+ /*
+ * Active format identification data is present in the AVI InfoFrame.
+ * Under scan info, no bar data
+ */
+ val = pix_fmt | under_scan |
+ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
+ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
+
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
+
+ /* AVI Data Byte 2 -Set the Aspect Ratio */
+ if (aspect_16_9) {
+ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
+ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
+ } else {
+ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
+ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
+ }
+
+ /* Set up colorimetry */
+ if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+ if (hdmi->hdmi_data.colorimetry == ITU601)
+ ext_colorimetry =
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ ext_colorimetry =
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+ } else if (hdmi->hdmi_data.enc_out_format != RGB) {
+ if (hdmi->hdmi_data.colorimetry == ITU601)
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
+ else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
+ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ } else { /* Carries no data */
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
+ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ }
+
+ val = colorimetry | coded_ratio | act_ratio;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1);
+
+ /* AVI Data Byte 3 */
+ val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
+ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
+ HDMI_FC_AVICONF2_SCALING_NONE;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2);
+
+ /* AVI Data Byte 4 */
+ hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID);
+
+ /* AVI Data Byte 5- set up input and output pixel repetition */
+ val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) <<
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) &
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) |
+ ((hdmi->hdmi_data.video_mode.mpixelrepetitionoutput <<
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) &
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
+ hdmi_writeb(hdmi, val, HDMI_FC_PRCONF);
+
+ /* IT Content and quantization range = don't care */
+ val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
+ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3);
+
+ /* AVI Data Bytes 6-13 */
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
+}
+
+static void hdmi_av_composer(struct imx_hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 inv_val;
+ struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
+ int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
+
+ vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
+ vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ vmode->mpixelclock = mode->clock * 1000;
+
+ dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
+
+ /* Set up HDMI_FC_INVIDCONF */
+ inv_val = (hdmi->hdmi_data.hdcp_enable ?
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
+
+ inv_val |= (vmode->mvsyncpolarity ?
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+
+ inv_val |= (vmode->mhsyncpolarity ?
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+
+ inv_val |= (vmode->mdataenablepolarity ?
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW);
+
+ if (hdmi->vic == 39)
+ inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
+ else
+ inv_val |= (vmode->minterlaced ?
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+
+ inv_val |= (vmode->minterlaced ?
+ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
+ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+
+ inv_val |= (vmode->mdvi ?
+ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
+ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+
+ hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
+
+ /* Set up horizontal active pixel width */
+ hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1);
+ hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0);
+
+ /* Set up vertical active lines */
+ hdmi_writeb(hdmi, mode->vdisplay >> 8, HDMI_FC_INVACTV1);
+ hdmi_writeb(hdmi, mode->vdisplay, HDMI_FC_INVACTV0);
+
+ /* Set up horizontal blanking pixel region width */
+ hblank = mode->htotal - mode->hdisplay;
+ hdmi_writeb(hdmi, hblank >> 8, HDMI_FC_INHBLANK1);
+ hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0);
+
+ /* Set up vertical blanking pixel region width */
+ vblank = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK);
+
+ /* Set up HSYNC active edge delay width (in pixel clks) */
+ h_de_hs = mode->hsync_start - mode->hdisplay;
+ hdmi_writeb(hdmi, h_de_hs >> 8, HDMI_FC_HSYNCINDELAY1);
+ hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0);
+
+ /* Set up VSYNC active edge delay (in lines) */
+ v_de_vs = mode->vsync_start - mode->vdisplay;
+ hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY);
+
+ /* Set up HSYNC active pulse width (in pixel clks) */
+ hsync_len = mode->hsync_end - mode->hsync_start;
+ hdmi_writeb(hdmi, hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
+ hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0);
+
+ /* Set up VSYNC active edge delay (in lines) */
+ vsync_len = mode->vsync_end - mode->vsync_start;
+ hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH);
+}
+
+static void imx_hdmi_phy_disable(struct imx_hdmi *hdmi)
+{
+ if (!hdmi->phy_enabled)
+ return;
+
+ imx_hdmi_phy_enable_tmds(hdmi, 0);
+ imx_hdmi_phy_enable_power(hdmi, 0);
+
+ hdmi->phy_enabled = false;
+}
+
+/* HDMI Initialization Step B.4 */
+static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
+{
+ u8 clkdis;
+
+ /* control period minimum duration */
+ hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR);
+ hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR);
+ hdmi_writeb(hdmi, 1, HDMI_FC_EXCTRLSPAC);
+
+ /* Set to fill TMDS data channels */
+ hdmi_writeb(hdmi, 0x0B, HDMI_FC_CH0PREAM);
+ hdmi_writeb(hdmi, 0x16, HDMI_FC_CH1PREAM);
+ hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM);
+
+ /* Enable pixel clock and tmds data path */
+ clkdis = 0x7F;
+ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+
+ clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+
+ /* Enable csc path */
+ if (is_color_space_conversion(hdmi)) {
+ clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ }
+}
+
+static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
+{
+ u8 clkdis;
+
+ clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS);
+ clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+}
+
+/* Workaround to clear the overflow condition */
+static void imx_hdmi_clear_overflow(struct imx_hdmi *hdmi)
+{
+ int count;
+ u8 val;
+
+ /* TMDS software reset */
+ hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
+
+ val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF);
+ if (hdmi->dev_type == IMX6DL_HDMI) {
+ hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
+ return;
+ }
+
+ for (count = 0; count < 4; count++)
+ hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
+}
+
+static void hdmi_enable_overflow_interrupts(struct imx_hdmi *hdmi)
+{
+ hdmi_writeb(hdmi, 0, HDMI_FC_MASK2);
+ hdmi_writeb(hdmi, 0, HDMI_IH_MUTE_FC_STAT2);
+}
+
+static void hdmi_disable_overflow_interrupts(struct imx_hdmi *hdmi)
+{
+ hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
+ HDMI_IH_MUTE_FC_STAT2);
+}
+
+static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
+{
+ int ret;
+
+ hdmi_disable_overflow_interrupts(hdmi);
+
+ hdmi->vic = drm_match_cea_mode(mode);
+
+ if (!hdmi->vic) {
+ dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n");
+ hdmi->hdmi_data.video_mode.mdvi = true;
+ } else {
+ dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
+ hdmi->hdmi_data.video_mode.mdvi = false;
+ }
+
+ if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
+ (hdmi->vic == 21) || (hdmi->vic == 22) ||
+ (hdmi->vic == 2) || (hdmi->vic == 3) ||
+ (hdmi->vic == 17) || (hdmi->vic == 18))
+ hdmi->hdmi_data.colorimetry = ITU601;
+ else
+ hdmi->hdmi_data.colorimetry = ITU709;
+
+ if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
+ (hdmi->vic == 12) || (hdmi->vic == 13) ||
+ (hdmi->vic == 14) || (hdmi->vic == 15) ||
+ (hdmi->vic == 25) || (hdmi->vic == 26) ||
+ (hdmi->vic == 27) || (hdmi->vic == 28) ||
+ (hdmi->vic == 29) || (hdmi->vic == 30) ||
+ (hdmi->vic == 35) || (hdmi->vic == 36) ||
+ (hdmi->vic == 37) || (hdmi->vic == 38))
+ hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
+ else
+ hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
+
+ hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
+
+ /* TODO: Get input format from IPU (via FB driver interface) */
+ hdmi->hdmi_data.enc_in_format = RGB;
+
+ hdmi->hdmi_data.enc_out_format = RGB;
+
+ hdmi->hdmi_data.enc_color_depth = 8;
+ hdmi->hdmi_data.pix_repet_factor = 0;
+ hdmi->hdmi_data.hdcp_enable = 0;
+ hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
+
+ /* HDMI Initialization Step B.1 */
+ hdmi_av_composer(hdmi, mode);
+
+ /* HDMI Initializateion Step B.2 */
+ ret = imx_hdmi_phy_init(hdmi);
+ if (ret)
+ return ret;
+
+ /* HDMI Initialization Step B.3 */
+ imx_hdmi_enable_video_path(hdmi);
+
+ /* not for DVI mode */
+ if (hdmi->hdmi_data.video_mode.mdvi)
+ dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
+ else {
+ dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
+
+ /* HDMI Initialization Step E - Configure audio */
+ hdmi_clk_regenerator_update_pixel_clock(hdmi);
+ hdmi_enable_audio_clk(hdmi);
+
+ /* HDMI Initialization Step F - Configure AVI InfoFrame */
+ hdmi_config_AVI(hdmi);
+ }
+
+ hdmi_video_packetize(hdmi);
+ hdmi_video_csc(hdmi);
+ hdmi_video_sample(hdmi);
+ hdmi_tx_hdcp_config(hdmi);
+
+ imx_hdmi_clear_overflow(hdmi);
+ if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
+ hdmi_enable_overflow_interrupts(hdmi);
+
+ return 0;
+}
+
+/* Wait until we are registered to enable interrupts */
+static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi)
+{
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
+ HDMI_PHY_I2CM_INT_ADDR);
+
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
+ HDMI_PHY_I2CM_CTLINT_ADDR);
+
+ /* enable cable hot plug irq */
+ hdmi_writeb(hdmi, (u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ /* Unmute interrupts */
+ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
+
+ return 0;
+}
+
+static void initialize_hdmi_ih_mutes(struct imx_hdmi *hdmi)
+{
+ u8 ih_mute;
+
+ /*
+ * Boot up defaults are:
+ * HDMI_IH_MUTE = 0x03 (disabled)
+ * HDMI_IH_MUTE_* = 0x00 (enabled)
+ *
+ * Disable top level interrupt bits in HDMI block
+ */
+ ih_mute = hdmi_readb(hdmi, HDMI_IH_MUTE) |
+ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT;
+
+ hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE);
+
+ /* by default mask all interrupts */
+ hdmi_writeb(hdmi, 0xff, HDMI_VP_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK0);
+ hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK1);
+ hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK2);
+ hdmi_writeb(hdmi, 0xff, HDMI_PHY_MASK0);
+ hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_INT_ADDR);
+ hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_CTLINT_ADDR);
+ hdmi_writeb(hdmi, 0xff, HDMI_AUD_INT);
+ hdmi_writeb(hdmi, 0xff, HDMI_AUD_SPDIFINT);
+ hdmi_writeb(hdmi, 0xff, HDMI_AUD_HBR_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_GP_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK);
+ hdmi_writeb(hdmi, 0xff, HDMI_CEC_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_I2CM_INT);
+ hdmi_writeb(hdmi, 0xff, HDMI_I2CM_CTLINT);
+
+ /* Disable interrupts in the IH_MUTE_* registers */
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT1);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT2);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AS_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_PHY_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CM_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_CEC_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_VP_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CMPHY_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
+
+ /* Enable top level interrupt bits in HDMI block */
+ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT);
+ hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE);
+}
+
+static void imx_hdmi_poweron(struct imx_hdmi *hdmi)
+{
+ imx_hdmi_setup(hdmi, &hdmi->previous_mode);
+}
+
+static void imx_hdmi_poweroff(struct imx_hdmi *hdmi)
+{
+ imx_hdmi_phy_disable(hdmi);
+}
+
+static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
+ *connector, bool force)
+{
+ /* FIXME */
+ return connector_status_connected;
+}
+
+static void imx_hdmi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ connector);
+ struct edid *edid;
+ int ret;
+
+ if (!hdmi->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, hdmi->ddc);
+ if (edid) {
+ dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
+ edid->width_cm, edid->height_cm);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ } else {
+ dev_dbg(hdmi->dev, "failed to get edid\n");
+ }
+
+ return 0;
+}
+
+static int imx_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ connector);
+
+ return &hdmi->encoder;
+}
+
+static void imx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+
+ imx_hdmi_setup(hdmi, mode);
+
+ /* Store the display mode for plugin/DKMS poweron events */
+ memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+}
+
+static bool imx_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void imx_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void imx_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+
+ if (mode)
+ imx_hdmi_poweroff(hdmi);
+ else
+ imx_hdmi_poweron(hdmi);
+}
+
+static void imx_hdmi_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+
+ imx_hdmi_poweroff(hdmi);
+ imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE,
+ V4L2_PIX_FMT_RGB24);
+}
+
+static void imx_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ int mux = imx_drm_encoder_get_mux_id(hdmi->imx_drm_encoder,
+ encoder->crtc);
+
+ imx_hdmi_set_ipu_di_mux(hdmi, mux);
+
+ imx_hdmi_poweron(hdmi);
+}
+
+static void imx_hdmi_encoder_destroy(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static struct drm_encoder_funcs imx_hdmi_encoder_funcs = {
+ .destroy = imx_hdmi_encoder_destroy,
+};
+
+static struct drm_encoder_helper_funcs imx_hdmi_encoder_helper_funcs = {
+ .dpms = imx_hdmi_encoder_dpms,
+ .prepare = imx_hdmi_encoder_prepare,
+ .commit = imx_hdmi_encoder_commit,
+ .mode_set = imx_hdmi_encoder_mode_set,
+ .mode_fixup = imx_hdmi_encoder_mode_fixup,
+ .disable = imx_hdmi_encoder_disable,
+};
+
+static struct drm_connector_funcs imx_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = imx_hdmi_connector_detect,
+ .destroy = imx_hdmi_connector_destroy,
+};
+
+static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
+ .get_modes = imx_hdmi_connector_get_modes,
+ .mode_valid = imx_hdmi_connector_mode_valid,
+ .best_encoder = imx_hdmi_connector_best_encoder,
+};
+
+static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
+{
+ struct imx_hdmi *hdmi = dev_id;
+ u8 intr_stat;
+ u8 phy_int_pol;
+ u8 val;
+
+ intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
+
+ phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
+
+ if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+ if (phy_int_pol & HDMI_PHY_HPD) {
+ dev_dbg(hdmi->dev, "EVENT=plugin\n");
+
+ val = hdmi_readb(hdmi, HDMI_PHY_POL0);
+ val &= ~HDMI_PHY_HPD;
+ hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+
+ imx_hdmi_poweron(hdmi);
+ } else {
+ dev_dbg(hdmi->dev, "EVENT=plugout\n");
+
+ val = hdmi_readb(hdmi, HDMI_PHY_POL0);
+ val |= HDMI_PHY_HPD;
+ hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+
+ imx_hdmi_poweroff(hdmi);
+ }
+ }
+
+ hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_hdmi_register(struct imx_hdmi *hdmi)
+{
+ int ret;
+
+ hdmi->connector.funcs = &imx_hdmi_connector_funcs;
+ hdmi->encoder.funcs = &imx_hdmi_encoder_funcs;
+
+ hdmi->encoder.encoder_type = DRM_MODE_ENCODER_TMDS;
+ hdmi->connector.connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
+ ret = imx_drm_add_encoder(&hdmi->encoder, &hdmi->imx_drm_encoder,
+ THIS_MODULE);
+ if (ret) {
+ dev_err(hdmi->dev, "adding encoder failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(&hdmi->connector,
+ &imx_hdmi_connector_helper_funcs);
+
+ ret = imx_drm_add_connector(&hdmi->connector,
+ &hdmi->imx_drm_connector, THIS_MODULE);
+ if (ret) {
+ imx_drm_remove_encoder(hdmi->imx_drm_encoder);
+ dev_err(hdmi->dev, "adding connector failed: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->connector.encoder = &hdmi->encoder;
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+
+ return 0;
+}
+
+static struct platform_device_id imx_hdmi_devtype[] = {
+ {
+ .name = "imx6q-hdmi",
+ .driver_data = IMX6Q_HDMI,
+ }, {
+ .name = "imx6dl-hdmi",
+ .driver_data = IMX6DL_HDMI,
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
+
+static const struct of_device_id imx_hdmi_dt_ids[] = {
+{ .compatible = "fsl,imx6q-hdmi", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
+{ .compatible = "fsl,imx6dl-hdmi", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
+{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
+
+static int imx_hdmi_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_hdmi_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ddc_node;
+ struct imx_hdmi *hdmi;
+ struct resource *iores;
+ int ret, irq;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = &pdev->dev;
+
+ if (of_id) {
+ const struct platform_device_id *device_id = of_id->data;
+ hdmi->dev_type = device_id->driver_data;
+ }
+
+ ddc_node = of_parse_phandle(np, "ddc", 0);
+ if (ddc_node) {
+ hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ if (!hdmi->ddc)
+ dev_dbg(hdmi->dev, "failed to read ddc node\n");
+
+ of_node_put(ddc_node);
+ } else {
+ dev_dbg(hdmi->dev, "no ddc property found\n");
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, irq, imx_hdmi_irq, 0,
+ dev_name(&pdev->dev), hdmi);
+ if (ret)
+ return ret;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap))
+ return PTR_ERR(hdmi->regmap);
+
+ hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
+ if (IS_ERR(hdmi->isfr_clk)) {
+ ret = PTR_ERR(hdmi->isfr_clk);
+ dev_err(hdmi->dev,
+ "Unable to get HDMI isfr clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(hdmi->isfr_clk);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Cannot enable HDMI isfr clock: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
+ if (IS_ERR(hdmi->iahb_clk)) {
+ ret = PTR_ERR(hdmi->iahb_clk);
+ dev_err(hdmi->dev,
+ "Unable to get HDMI iahb clk: %d\n", ret);
+ goto err_isfr;
+ }
+
+ ret = clk_prepare_enable(hdmi->iahb_clk);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Cannot enable HDMI iahb clock: %d\n", ret);
+ goto err_isfr;
+ }
+
+ /* Product and revision IDs */
+ dev_info(&pdev->dev,
+ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
+ hdmi_readb(hdmi, HDMI_DESIGN_ID),
+ hdmi_readb(hdmi, HDMI_REVISION_ID),
+ hdmi_readb(hdmi, HDMI_PRODUCT_ID0),
+ hdmi_readb(hdmi, HDMI_PRODUCT_ID1));
+
+ initialize_hdmi_ih_mutes(hdmi);
+
+ /*
+ * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
+ * N and cts values before enabling phy
+ */
+ hdmi_init_clk_regenerator(hdmi);
+
+ /*
+ * Configure registers related to HDMI interrupt
+ * generation before registering IRQ.
+ */
+ hdmi_writeb(hdmi, HDMI_PHY_HPD, HDMI_PHY_POL0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ ret = imx_hdmi_fb_registered(hdmi);
+ if (ret)
+ goto err_iahb;
+
+ ret = imx_hdmi_register(hdmi);
+ if (ret)
+ goto err_iahb;
+
+ imx_drm_encoder_add_possible_crtcs(hdmi->imx_drm_encoder, np);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+
+err_iahb:
+ clk_disable_unprepare(hdmi->iahb_clk);
+err_isfr:
+ clk_disable_unprepare(hdmi->isfr_clk);
+
+ return ret;
+}
+
+static int imx_hdmi_platform_remove(struct platform_device *pdev)
+{
+ struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+ struct drm_connector *connector = &hdmi->connector;
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ drm_mode_connector_detach_encoder(connector, encoder);
+ imx_drm_remove_connector(hdmi->imx_drm_connector);
+ imx_drm_remove_encoder(hdmi->imx_drm_encoder);
+
+ clk_disable_unprepare(hdmi->iahb_clk);
+ clk_disable_unprepare(hdmi->isfr_clk);
+ i2c_put_adapter(hdmi->ddc);
+
+ return 0;
+}
+
+static struct platform_driver imx_hdmi_driver = {
+ .probe = imx_hdmi_platform_probe,
+ .remove = imx_hdmi_platform_remove,
+ .driver = {
+ .name = "imx-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_hdmi_dt_ids,
+ },
+};
+
+module_platform_driver(imx_hdmi_driver);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX6 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-hdmi");
diff --git a/drivers/staging/imx-drm/imx-hdmi.h b/drivers/staging/imx-drm/imx-hdmi.h
new file mode 100644
index 0000000..39b6776
--- /dev/null
+++ b/drivers/staging/imx-drm/imx-hdmi.h
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __IMX_HDMI_H__
+#define __IMX_HDMI_H__
+
+/* Identification Registers */
+#define HDMI_DESIGN_ID 0x0000
+#define HDMI_REVISION_ID 0x0001
+#define HDMI_PRODUCT_ID0 0x0002
+#define HDMI_PRODUCT_ID1 0x0003
+#define HDMI_CONFIG0_ID 0x0004
+#define HDMI_CONFIG1_ID 0x0005
+#define HDMI_CONFIG2_ID 0x0006
+#define HDMI_CONFIG3_ID 0x0007
+
+/* Interrupt Registers */
+#define HDMI_IH_FC_STAT0 0x0100
+#define HDMI_IH_FC_STAT1 0x0101
+#define HDMI_IH_FC_STAT2 0x0102
+#define HDMI_IH_AS_STAT0 0x0103
+#define HDMI_IH_PHY_STAT0 0x0104
+#define HDMI_IH_I2CM_STAT0 0x0105
+#define HDMI_IH_CEC_STAT0 0x0106
+#define HDMI_IH_VP_STAT0 0x0107
+#define HDMI_IH_I2CMPHY_STAT0 0x0108
+#define HDMI_IH_AHBDMAAUD_STAT0 0x0109
+
+#define HDMI_IH_MUTE_FC_STAT0 0x0180
+#define HDMI_IH_MUTE_FC_STAT1 0x0181
+#define HDMI_IH_MUTE_FC_STAT2 0x0182
+#define HDMI_IH_MUTE_AS_STAT0 0x0183
+#define HDMI_IH_MUTE_PHY_STAT0 0x0184
+#define HDMI_IH_MUTE_I2CM_STAT0 0x0185
+#define HDMI_IH_MUTE_CEC_STAT0 0x0186
+#define HDMI_IH_MUTE_VP_STAT0 0x0187
+#define HDMI_IH_MUTE_I2CMPHY_STAT0 0x0188
+#define HDMI_IH_MUTE_AHBDMAAUD_STAT0 0x0189
+#define HDMI_IH_MUTE 0x01FF
+
+/* Video Sample Registers */
+#define HDMI_TX_INVID0 0x0200
+#define HDMI_TX_INSTUFFING 0x0201
+#define HDMI_TX_GYDATA0 0x0202
+#define HDMI_TX_GYDATA1 0x0203
+#define HDMI_TX_RCRDATA0 0x0204
+#define HDMI_TX_RCRDATA1 0x0205
+#define HDMI_TX_BCBDATA0 0x0206
+#define HDMI_TX_BCBDATA1 0x0207
+
+/* Video Packetizer Registers */
+#define HDMI_VP_STATUS 0x0800
+#define HDMI_VP_PR_CD 0x0801
+#define HDMI_VP_STUFF 0x0802
+#define HDMI_VP_REMAP 0x0803
+#define HDMI_VP_CONF 0x0804
+#define HDMI_VP_STAT 0x0805
+#define HDMI_VP_INT 0x0806
+#define HDMI_VP_MASK 0x0807
+#define HDMI_VP_POL 0x0808
+
+/* Frame Composer Registers */
+#define HDMI_FC_INVIDCONF 0x1000
+#define HDMI_FC_INHACTV0 0x1001
+#define HDMI_FC_INHACTV1 0x1002
+#define HDMI_FC_INHBLANK0 0x1003
+#define HDMI_FC_INHBLANK1 0x1004
+#define HDMI_FC_INVACTV0 0x1005
+#define HDMI_FC_INVACTV1 0x1006
+#define HDMI_FC_INVBLANK 0x1007
+#define HDMI_FC_HSYNCINDELAY0 0x1008
+#define HDMI_FC_HSYNCINDELAY1 0x1009
+#define HDMI_FC_HSYNCINWIDTH0 0x100A
+#define HDMI_FC_HSYNCINWIDTH1 0x100B
+#define HDMI_FC_VSYNCINDELAY 0x100C
+#define HDMI_FC_VSYNCINWIDTH 0x100D
+#define HDMI_FC_INFREQ0 0x100E
+#define HDMI_FC_INFREQ1 0x100F
+#define HDMI_FC_INFREQ2 0x1010
+#define HDMI_FC_CTRLDUR 0x1011
+#define HDMI_FC_EXCTRLDUR 0x1012
+#define HDMI_FC_EXCTRLSPAC 0x1013
+#define HDMI_FC_CH0PREAM 0x1014
+#define HDMI_FC_CH1PREAM 0x1015
+#define HDMI_FC_CH2PREAM 0x1016
+#define HDMI_FC_AVICONF3 0x1017
+#define HDMI_FC_GCP 0x1018
+#define HDMI_FC_AVICONF0 0x1019
+#define HDMI_FC_AVICONF1 0x101A
+#define HDMI_FC_AVICONF2 0x101B
+#define HDMI_FC_AVIVID 0x101C
+#define HDMI_FC_AVIETB0 0x101D
+#define HDMI_FC_AVIETB1 0x101E
+#define HDMI_FC_AVISBB0 0x101F
+#define HDMI_FC_AVISBB1 0x1020
+#define HDMI_FC_AVIELB0 0x1021
+#define HDMI_FC_AVIELB1 0x1022
+#define HDMI_FC_AVISRB0 0x1023
+#define HDMI_FC_AVISRB1 0x1024
+#define HDMI_FC_AUDICONF0 0x1025
+#define HDMI_FC_AUDICONF1 0x1026
+#define HDMI_FC_AUDICONF2 0x1027
+#define HDMI_FC_AUDICONF3 0x1028
+#define HDMI_FC_VSDIEEEID0 0x1029
+#define HDMI_FC_VSDSIZE 0x102A
+#define HDMI_FC_VSDIEEEID1 0x1030
+#define HDMI_FC_VSDIEEEID2 0x1031
+#define HDMI_FC_VSDPAYLOAD0 0x1032
+#define HDMI_FC_VSDPAYLOAD1 0x1033
+#define HDMI_FC_VSDPAYLOAD2 0x1034
+#define HDMI_FC_VSDPAYLOAD3 0x1035
+#define HDMI_FC_VSDPAYLOAD4 0x1036
+#define HDMI_FC_VSDPAYLOAD5 0x1037
+#define HDMI_FC_VSDPAYLOAD6 0x1038
+#define HDMI_FC_VSDPAYLOAD7 0x1039
+#define HDMI_FC_VSDPAYLOAD8 0x103A
+#define HDMI_FC_VSDPAYLOAD9 0x103B
+#define HDMI_FC_VSDPAYLOAD10 0x103C
+#define HDMI_FC_VSDPAYLOAD11 0x103D
+#define HDMI_FC_VSDPAYLOAD12 0x103E
+#define HDMI_FC_VSDPAYLOAD13 0x103F
+#define HDMI_FC_VSDPAYLOAD14 0x1040
+#define HDMI_FC_VSDPAYLOAD15 0x1041
+#define HDMI_FC_VSDPAYLOAD16 0x1042
+#define HDMI_FC_VSDPAYLOAD17 0x1043
+#define HDMI_FC_VSDPAYLOAD18 0x1044
+#define HDMI_FC_VSDPAYLOAD19 0x1045
+#define HDMI_FC_VSDPAYLOAD20 0x1046
+#define HDMI_FC_VSDPAYLOAD21 0x1047
+#define HDMI_FC_VSDPAYLOAD22 0x1048
+#define HDMI_FC_VSDPAYLOAD23 0x1049
+#define HDMI_FC_SPDVENDORNAME0 0x104A
+#define HDMI_FC_SPDVENDORNAME1 0x104B
+#define HDMI_FC_SPDVENDORNAME2 0x104C
+#define HDMI_FC_SPDVENDORNAME3 0x104D
+#define HDMI_FC_SPDVENDORNAME4 0x104E
+#define HDMI_FC_SPDVENDORNAME5 0x104F
+#define HDMI_FC_SPDVENDORNAME6 0x1050
+#define HDMI_FC_SPDVENDORNAME7 0x1051
+#define HDMI_FC_SDPPRODUCTNAME0 0x1052
+#define HDMI_FC_SDPPRODUCTNAME1 0x1053
+#define HDMI_FC_SDPPRODUCTNAME2 0x1054
+#define HDMI_FC_SDPPRODUCTNAME3 0x1055
+#define HDMI_FC_SDPPRODUCTNAME4 0x1056
+#define HDMI_FC_SDPPRODUCTNAME5 0x1057
+#define HDMI_FC_SDPPRODUCTNAME6 0x1058
+#define HDMI_FC_SDPPRODUCTNAME7 0x1059
+#define HDMI_FC_SDPPRODUCTNAME8 0x105A
+#define HDMI_FC_SDPPRODUCTNAME9 0x105B
+#define HDMI_FC_SDPPRODUCTNAME10 0x105C
+#define HDMI_FC_SDPPRODUCTNAME11 0x105D
+#define HDMI_FC_SDPPRODUCTNAME12 0x105E
+#define HDMI_FC_SDPPRODUCTNAME13 0x105F
+#define HDMI_FC_SDPPRODUCTNAME14 0x1060
+#define HDMI_FC_SPDPRODUCTNAME15 0x1061
+#define HDMI_FC_SPDDEVICEINF 0x1062
+#define HDMI_FC_AUDSCONF 0x1063
+#define HDMI_FC_AUDSSTAT 0x1064
+#define HDMI_FC_DATACH0FILL 0x1070
+#define HDMI_FC_DATACH1FILL 0x1071
+#define HDMI_FC_DATACH2FILL 0x1072
+#define HDMI_FC_CTRLQHIGH 0x1073
+#define HDMI_FC_CTRLQLOW 0x1074
+#define HDMI_FC_ACP0 0x1075
+#define HDMI_FC_ACP28 0x1076
+#define HDMI_FC_ACP27 0x1077
+#define HDMI_FC_ACP26 0x1078
+#define HDMI_FC_ACP25 0x1079
+#define HDMI_FC_ACP24 0x107A
+#define HDMI_FC_ACP23 0x107B
+#define HDMI_FC_ACP22 0x107C
+#define HDMI_FC_ACP21 0x107D
+#define HDMI_FC_ACP20 0x107E
+#define HDMI_FC_ACP19 0x107F
+#define HDMI_FC_ACP18 0x1080
+#define HDMI_FC_ACP17 0x1081
+#define HDMI_FC_ACP16 0x1082
+#define HDMI_FC_ACP15 0x1083
+#define HDMI_FC_ACP14 0x1084
+#define HDMI_FC_ACP13 0x1085
+#define HDMI_FC_ACP12 0x1086
+#define HDMI_FC_ACP11 0x1087
+#define HDMI_FC_ACP10 0x1088
+#define HDMI_FC_ACP9 0x1089
+#define HDMI_FC_ACP8 0x108A
+#define HDMI_FC_ACP7 0x108B
+#define HDMI_FC_ACP6 0x108C
+#define HDMI_FC_ACP5 0x108D
+#define HDMI_FC_ACP4 0x108E
+#define HDMI_FC_ACP3 0x108F
+#define HDMI_FC_ACP2 0x1090
+#define HDMI_FC_ACP1 0x1091
+#define HDMI_FC_ISCR1_0 0x1092
+#define HDMI_FC_ISCR1_16 0x1093
+#define HDMI_FC_ISCR1_15 0x1094
+#define HDMI_FC_ISCR1_14 0x1095
+#define HDMI_FC_ISCR1_13 0x1096
+#define HDMI_FC_ISCR1_12 0x1097
+#define HDMI_FC_ISCR1_11 0x1098
+#define HDMI_FC_ISCR1_10 0x1099
+#define HDMI_FC_ISCR1_9 0x109A
+#define HDMI_FC_ISCR1_8 0x109B
+#define HDMI_FC_ISCR1_7 0x109C
+#define HDMI_FC_ISCR1_6 0x109D
+#define HDMI_FC_ISCR1_5 0x109E
+#define HDMI_FC_ISCR1_4 0x109F
+#define HDMI_FC_ISCR1_3 0x10A0
+#define HDMI_FC_ISCR1_2 0x10A1
+#define HDMI_FC_ISCR1_1 0x10A2
+#define HDMI_FC_ISCR2_15 0x10A3
+#define HDMI_FC_ISCR2_14 0x10A4
+#define HDMI_FC_ISCR2_13 0x10A5
+#define HDMI_FC_ISCR2_12 0x10A6
+#define HDMI_FC_ISCR2_11 0x10A7
+#define HDMI_FC_ISCR2_10 0x10A8
+#define HDMI_FC_ISCR2_9 0x10A9
+#define HDMI_FC_ISCR2_8 0x10AA
+#define HDMI_FC_ISCR2_7 0x10AB
+#define HDMI_FC_ISCR2_6 0x10AC
+#define HDMI_FC_ISCR2_5 0x10AD
+#define HDMI_FC_ISCR2_4 0x10AE
+#define HDMI_FC_ISCR2_3 0x10AF
+#define HDMI_FC_ISCR2_2 0x10B0
+#define HDMI_FC_ISCR2_1 0x10B1
+#define HDMI_FC_ISCR2_0 0x10B2
+#define HDMI_FC_DATAUTO0 0x10B3
+#define HDMI_FC_DATAUTO1 0x10B4
+#define HDMI_FC_DATAUTO2 0x10B5
+#define HDMI_FC_DATMAN 0x10B6
+#define HDMI_FC_DATAUTO3 0x10B7
+#define HDMI_FC_RDRB0 0x10B8
+#define HDMI_FC_RDRB1 0x10B9
+#define HDMI_FC_RDRB2 0x10BA
+#define HDMI_FC_RDRB3 0x10BB
+#define HDMI_FC_RDRB4 0x10BC
+#define HDMI_FC_RDRB5 0x10BD
+#define HDMI_FC_RDRB6 0x10BE
+#define HDMI_FC_RDRB7 0x10BF
+#define HDMI_FC_STAT0 0x10D0
+#define HDMI_FC_INT0 0x10D1
+#define HDMI_FC_MASK0 0x10D2
+#define HDMI_FC_POL0 0x10D3
+#define HDMI_FC_STAT1 0x10D4
+#define HDMI_FC_INT1 0x10D5
+#define HDMI_FC_MASK1 0x10D6
+#define HDMI_FC_POL1 0x10D7
+#define HDMI_FC_STAT2 0x10D8
+#define HDMI_FC_INT2 0x10D9
+#define HDMI_FC_MASK2 0x10DA
+#define HDMI_FC_POL2 0x10DB
+#define HDMI_FC_PRCONF 0x10E0
+
+#define HDMI_FC_GMD_STAT 0x1100
+#define HDMI_FC_GMD_EN 0x1101
+#define HDMI_FC_GMD_UP 0x1102
+#define HDMI_FC_GMD_CONF 0x1103
+#define HDMI_FC_GMD_HB 0x1104
+#define HDMI_FC_GMD_PB0 0x1105
+#define HDMI_FC_GMD_PB1 0x1106
+#define HDMI_FC_GMD_PB2 0x1107
+#define HDMI_FC_GMD_PB3 0x1108
+#define HDMI_FC_GMD_PB4 0x1109
+#define HDMI_FC_GMD_PB5 0x110A
+#define HDMI_FC_GMD_PB6 0x110B
+#define HDMI_FC_GMD_PB7 0x110C
+#define HDMI_FC_GMD_PB8 0x110D
+#define HDMI_FC_GMD_PB9 0x110E
+#define HDMI_FC_GMD_PB10 0x110F
+#define HDMI_FC_GMD_PB11 0x1110
+#define HDMI_FC_GMD_PB12 0x1111
+#define HDMI_FC_GMD_PB13 0x1112
+#define HDMI_FC_GMD_PB14 0x1113
+#define HDMI_FC_GMD_PB15 0x1114
+#define HDMI_FC_GMD_PB16 0x1115
+#define HDMI_FC_GMD_PB17 0x1116
+#define HDMI_FC_GMD_PB18 0x1117
+#define HDMI_FC_GMD_PB19 0x1118
+#define HDMI_FC_GMD_PB20 0x1119
+#define HDMI_FC_GMD_PB21 0x111A
+#define HDMI_FC_GMD_PB22 0x111B
+#define HDMI_FC_GMD_PB23 0x111C
+#define HDMI_FC_GMD_PB24 0x111D
+#define HDMI_FC_GMD_PB25 0x111E
+#define HDMI_FC_GMD_PB26 0x111F
+#define HDMI_FC_GMD_PB27 0x1120
+
+#define HDMI_FC_DBGFORCE 0x1200
+#define HDMI_FC_DBGAUD0CH0 0x1201
+#define HDMI_FC_DBGAUD1CH0 0x1202
+#define HDMI_FC_DBGAUD2CH0 0x1203
+#define HDMI_FC_DBGAUD0CH1 0x1204
+#define HDMI_FC_DBGAUD1CH1 0x1205
+#define HDMI_FC_DBGAUD2CH1 0x1206
+#define HDMI_FC_DBGAUD0CH2 0x1207
+#define HDMI_FC_DBGAUD1CH2 0x1208
+#define HDMI_FC_DBGAUD2CH2 0x1209
+#define HDMI_FC_DBGAUD0CH3 0x120A
+#define HDMI_FC_DBGAUD1CH3 0x120B
+#define HDMI_FC_DBGAUD2CH3 0x120C
+#define HDMI_FC_DBGAUD0CH4 0x120D
+#define HDMI_FC_DBGAUD1CH4 0x120E
+#define HDMI_FC_DBGAUD2CH4 0x120F
+#define HDMI_FC_DBGAUD0CH5 0x1210
+#define HDMI_FC_DBGAUD1CH5 0x1211
+#define HDMI_FC_DBGAUD2CH5 0x1212
+#define HDMI_FC_DBGAUD0CH6 0x1213
+#define HDMI_FC_DBGAUD1CH6 0x1214
+#define HDMI_FC_DBGAUD2CH6 0x1215
+#define HDMI_FC_DBGAUD0CH7 0x1216
+#define HDMI_FC_DBGAUD1CH7 0x1217
+#define HDMI_FC_DBGAUD2CH7 0x1218
+#define HDMI_FC_DBGTMDS0 0x1219
+#define HDMI_FC_DBGTMDS1 0x121A
+#define HDMI_FC_DBGTMDS2 0x121B
+
+/* HDMI Source PHY Registers */
+#define HDMI_PHY_CONF0 0x3000
+#define HDMI_PHY_TST0 0x3001
+#define HDMI_PHY_TST1 0x3002
+#define HDMI_PHY_TST2 0x3003
+#define HDMI_PHY_STAT0 0x3004
+#define HDMI_PHY_INT0 0x3005
+#define HDMI_PHY_MASK0 0x3006
+#define HDMI_PHY_POL0 0x3007
+
+/* HDMI Master PHY Registers */
+#define HDMI_PHY_I2CM_SLAVE_ADDR 0x3020
+#define HDMI_PHY_I2CM_ADDRESS_ADDR 0x3021
+#define HDMI_PHY_I2CM_DATAO_1_ADDR 0x3022
+#define HDMI_PHY_I2CM_DATAO_0_ADDR 0x3023
+#define HDMI_PHY_I2CM_DATAI_1_ADDR 0x3024
+#define HDMI_PHY_I2CM_DATAI_0_ADDR 0x3025
+#define HDMI_PHY_I2CM_OPERATION_ADDR 0x3026
+#define HDMI_PHY_I2CM_INT_ADDR 0x3027
+#define HDMI_PHY_I2CM_CTLINT_ADDR 0x3028
+#define HDMI_PHY_I2CM_DIV_ADDR 0x3029
+#define HDMI_PHY_I2CM_SOFTRSTZ_ADDR 0x302a
+#define HDMI_PHY_I2CM_SS_SCL_HCNT_1_ADDR 0x302b
+#define HDMI_PHY_I2CM_SS_SCL_HCNT_0_ADDR 0x302c
+#define HDMI_PHY_I2CM_SS_SCL_LCNT_1_ADDR 0x302d
+#define HDMI_PHY_I2CM_SS_SCL_LCNT_0_ADDR 0x302e
+#define HDMI_PHY_I2CM_FS_SCL_HCNT_1_ADDR 0x302f
+#define HDMI_PHY_I2CM_FS_SCL_HCNT_0_ADDR 0x3030
+#define HDMI_PHY_I2CM_FS_SCL_LCNT_1_ADDR 0x3031
+#define HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR 0x3032
+
+/* Audio Sampler Registers */
+#define HDMI_AUD_CONF0 0x3100
+#define HDMI_AUD_CONF1 0x3101
+#define HDMI_AUD_INT 0x3102
+#define HDMI_AUD_CONF2 0x3103
+#define HDMI_AUD_N1 0x3200
+#define HDMI_AUD_N2 0x3201
+#define HDMI_AUD_N3 0x3202
+#define HDMI_AUD_CTS1 0x3203
+#define HDMI_AUD_CTS2 0x3204
+#define HDMI_AUD_CTS3 0x3205
+#define HDMI_AUD_INPUTCLKFS 0x3206
+#define HDMI_AUD_SPDIFINT 0x3302
+#define HDMI_AUD_CONF0_HBR 0x3400
+#define HDMI_AUD_HBR_STATUS 0x3401
+#define HDMI_AUD_HBR_INT 0x3402
+#define HDMI_AUD_HBR_POL 0x3403
+#define HDMI_AUD_HBR_MASK 0x3404
+
+/*
+ * Generic Parallel Audio Interface Registers
+ * Not used as GPAUD interface is not enabled in hw
+ */
+#define HDMI_GP_CONF0 0x3500
+#define HDMI_GP_CONF1 0x3501
+#define HDMI_GP_CONF2 0x3502
+#define HDMI_GP_STAT 0x3503
+#define HDMI_GP_INT 0x3504
+#define HDMI_GP_MASK 0x3505
+#define HDMI_GP_POL 0x3506
+
+/* Audio DMA Registers */
+#define HDMI_AHB_DMA_CONF0 0x3600
+#define HDMI_AHB_DMA_START 0x3601
+#define HDMI_AHB_DMA_STOP 0x3602
+#define HDMI_AHB_DMA_THRSLD 0x3603
+#define HDMI_AHB_DMA_STRADDR0 0x3604
+#define HDMI_AHB_DMA_STRADDR1 0x3605
+#define HDMI_AHB_DMA_STRADDR2 0x3606
+#define HDMI_AHB_DMA_STRADDR3 0x3607
+#define HDMI_AHB_DMA_STPADDR0 0x3608
+#define HDMI_AHB_DMA_STPADDR1 0x3609
+#define HDMI_AHB_DMA_STPADDR2 0x360a
+#define HDMI_AHB_DMA_STPADDR3 0x360b
+#define HDMI_AHB_DMA_BSTADDR0 0x360c
+#define HDMI_AHB_DMA_BSTADDR1 0x360d
+#define HDMI_AHB_DMA_BSTADDR2 0x360e
+#define HDMI_AHB_DMA_BSTADDR3 0x360f
+#define HDMI_AHB_DMA_MBLENGTH0 0x3610
+#define HDMI_AHB_DMA_MBLENGTH1 0x3611
+#define HDMI_AHB_DMA_STAT 0x3612
+#define HDMI_AHB_DMA_INT 0x3613
+#define HDMI_AHB_DMA_MASK 0x3614
+#define HDMI_AHB_DMA_POL 0x3615
+#define HDMI_AHB_DMA_CONF1 0x3616
+#define HDMI_AHB_DMA_BUFFSTAT 0x3617
+#define HDMI_AHB_DMA_BUFFINT 0x3618
+#define HDMI_AHB_DMA_BUFFMASK 0x3619
+#define HDMI_AHB_DMA_BUFFPOL 0x361a
+
+/* Main Controller Registers */
+#define HDMI_MC_SFRDIV 0x4000
+#define HDMI_MC_CLKDIS 0x4001
+#define HDMI_MC_SWRSTZ 0x4002
+#define HDMI_MC_OPCTRL 0x4003
+#define HDMI_MC_FLOWCTRL 0x4004
+#define HDMI_MC_PHYRSTZ 0x4005
+#define HDMI_MC_LOCKONCLOCK 0x4006
+#define HDMI_MC_HEACPHY_RST 0x4007
+
+/* Color Space Converter Registers */
+#define HDMI_CSC_CFG 0x4100
+#define HDMI_CSC_SCALE 0x4101
+#define HDMI_CSC_COEF_A1_MSB 0x4102
+#define HDMI_CSC_COEF_A1_LSB 0x4103
+#define HDMI_CSC_COEF_A2_MSB 0x4104
+#define HDMI_CSC_COEF_A2_LSB 0x4105
+#define HDMI_CSC_COEF_A3_MSB 0x4106
+#define HDMI_CSC_COEF_A3_LSB 0x4107
+#define HDMI_CSC_COEF_A4_MSB 0x4108
+#define HDMI_CSC_COEF_A4_LSB 0x4109
+#define HDMI_CSC_COEF_B1_MSB 0x410A
+#define HDMI_CSC_COEF_B1_LSB 0x410B
+#define HDMI_CSC_COEF_B2_MSB 0x410C
+#define HDMI_CSC_COEF_B2_LSB 0x410D
+#define HDMI_CSC_COEF_B3_MSB 0x410E
+#define HDMI_CSC_COEF_B3_LSB 0x410F
+#define HDMI_CSC_COEF_B4_MSB 0x4110
+#define HDMI_CSC_COEF_B4_LSB 0x4111
+#define HDMI_CSC_COEF_C1_MSB 0x4112
+#define HDMI_CSC_COEF_C1_LSB 0x4113
+#define HDMI_CSC_COEF_C2_MSB 0x4114
+#define HDMI_CSC_COEF_C2_LSB 0x4115
+#define HDMI_CSC_COEF_C3_MSB 0x4116
+#define HDMI_CSC_COEF_C3_LSB 0x4117
+#define HDMI_CSC_COEF_C4_MSB 0x4118
+#define HDMI_CSC_COEF_C4_LSB 0x4119
+
+/* HDCP Encryption Engine Registers */
+#define HDMI_A_HDCPCFG0 0x5000
+#define HDMI_A_HDCPCFG1 0x5001
+#define HDMI_A_HDCPOBS0 0x5002
+#define HDMI_A_HDCPOBS1 0x5003
+#define HDMI_A_HDCPOBS2 0x5004
+#define HDMI_A_HDCPOBS3 0x5005
+#define HDMI_A_APIINTCLR 0x5006
+#define HDMI_A_APIINTSTAT 0x5007
+#define HDMI_A_APIINTMSK 0x5008
+#define HDMI_A_VIDPOLCFG 0x5009
+#define HDMI_A_OESSWCFG 0x500A
+#define HDMI_A_TIMER1SETUP0 0x500B
+#define HDMI_A_TIMER1SETUP1 0x500C
+#define HDMI_A_TIMER2SETUP0 0x500D
+#define HDMI_A_TIMER2SETUP1 0x500E
+#define HDMI_A_100MSCFG 0x500F
+#define HDMI_A_2SCFG0 0x5010
+#define HDMI_A_2SCFG1 0x5011
+#define HDMI_A_5SCFG0 0x5012
+#define HDMI_A_5SCFG1 0x5013
+#define HDMI_A_SRMVERLSB 0x5014
+#define HDMI_A_SRMVERMSB 0x5015
+#define HDMI_A_SRMCTRL 0x5016
+#define HDMI_A_SFRSETUP 0x5017
+#define HDMI_A_I2CHSETUP 0x5018
+#define HDMI_A_INTSETUP 0x5019
+#define HDMI_A_PRESETUP 0x501A
+#define HDMI_A_SRM_BASE 0x5020
+
+/* CEC Engine Registers */
+#define HDMI_CEC_CTRL 0x7D00
+#define HDMI_CEC_STAT 0x7D01
+#define HDMI_CEC_MASK 0x7D02
+#define HDMI_CEC_POLARITY 0x7D03
+#define HDMI_CEC_INT 0x7D04
+#define HDMI_CEC_ADDR_L 0x7D05
+#define HDMI_CEC_ADDR_H 0x7D06
+#define HDMI_CEC_TX_CNT 0x7D07
+#define HDMI_CEC_RX_CNT 0x7D08
+#define HDMI_CEC_TX_DATA0 0x7D10
+#define HDMI_CEC_TX_DATA1 0x7D11
+#define HDMI_CEC_TX_DATA2 0x7D12
+#define HDMI_CEC_TX_DATA3 0x7D13
+#define HDMI_CEC_TX_DATA4 0x7D14
+#define HDMI_CEC_TX_DATA5 0x7D15
+#define HDMI_CEC_TX_DATA6 0x7D16
+#define HDMI_CEC_TX_DATA7 0x7D17
+#define HDMI_CEC_TX_DATA8 0x7D18
+#define HDMI_CEC_TX_DATA9 0x7D19
+#define HDMI_CEC_TX_DATA10 0x7D1a
+#define HDMI_CEC_TX_DATA11 0x7D1b
+#define HDMI_CEC_TX_DATA12 0x7D1c
+#define HDMI_CEC_TX_DATA13 0x7D1d
+#define HDMI_CEC_TX_DATA14 0x7D1e
+#define HDMI_CEC_TX_DATA15 0x7D1f
+#define HDMI_CEC_RX_DATA0 0x7D20
+#define HDMI_CEC_RX_DATA1 0x7D21
+#define HDMI_CEC_RX_DATA2 0x7D22
+#define HDMI_CEC_RX_DATA3 0x7D23
+#define HDMI_CEC_RX_DATA4 0x7D24
+#define HDMI_CEC_RX_DATA5 0x7D25
+#define HDMI_CEC_RX_DATA6 0x7D26
+#define HDMI_CEC_RX_DATA7 0x7D27
+#define HDMI_CEC_RX_DATA8 0x7D28
+#define HDMI_CEC_RX_DATA9 0x7D29
+#define HDMI_CEC_RX_DATA10 0x7D2a
+#define HDMI_CEC_RX_DATA11 0x7D2b
+#define HDMI_CEC_RX_DATA12 0x7D2c
+#define HDMI_CEC_RX_DATA13 0x7D2d
+#define HDMI_CEC_RX_DATA14 0x7D2e
+#define HDMI_CEC_RX_DATA15 0x7D2f
+#define HDMI_CEC_LOCK 0x7D30
+#define HDMI_CEC_WKUPCTRL 0x7D31
+
+/* I2C Master Registers (E-DDC) */
+#define HDMI_I2CM_SLAVE 0x7E00
+#define HDMI_I2CMESS 0x7E01
+#define HDMI_I2CM_DATAO 0x7E02
+#define HDMI_I2CM_DATAI 0x7E03
+#define HDMI_I2CM_OPERATION 0x7E04
+#define HDMI_I2CM_INT 0x7E05
+#define HDMI_I2CM_CTLINT 0x7E06
+#define HDMI_I2CM_DIV 0x7E07
+#define HDMI_I2CM_SEGADDR 0x7E08
+#define HDMI_I2CM_SOFTRSTZ 0x7E09
+#define HDMI_I2CM_SEGPTR 0x7E0A
+#define HDMI_I2CM_SS_SCL_HCNT_1_ADDR 0x7E0B
+#define HDMI_I2CM_SS_SCL_HCNT_0_ADDR 0x7E0C
+#define HDMI_I2CM_SS_SCL_LCNT_1_ADDR 0x7E0D
+#define HDMI_I2CM_SS_SCL_LCNT_0_ADDR 0x7E0E
+#define HDMI_I2CM_FS_SCL_HCNT_1_ADDR 0x7E0F
+#define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10
+#define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11
+#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
+
+enum {
+/* IH_FC_INT2 field values */
+ HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
+ HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_IH_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* IH_FC_STAT2 field values */
+ HDMI_IH_FC_STAT2_OVERFLOW_MASK = 0x03,
+ HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* IH_PHY_STAT0 field values */
+ HDMI_IH_PHY_STAT0_RX_SENSE3 = 0x20,
+ HDMI_IH_PHY_STAT0_RX_SENSE2 = 0x10,
+ HDMI_IH_PHY_STAT0_RX_SENSE1 = 0x8,
+ HDMI_IH_PHY_STAT0_RX_SENSE0 = 0x4,
+ HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
+ HDMI_IH_PHY_STAT0_HPD = 0x1,
+
+/* IH_MUTE_I2CMPHY_STAT0 field values */
+ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
+ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
+
+/* IH_AHBDMAAUD_STAT0 field values */
+ HDMI_IH_AHBDMAAUD_STAT0_ERROR = 0x20,
+ HDMI_IH_AHBDMAAUD_STAT0_LOST = 0x10,
+ HDMI_IH_AHBDMAAUD_STAT0_RETRY = 0x08,
+ HDMI_IH_AHBDMAAUD_STAT0_DONE = 0x04,
+ HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
+ HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
+
+/* IH_MUTE_FC_STAT2 field values */
+ HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK = 0x03,
+ HDMI_IH_MUTE_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_IH_MUTE_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* IH_MUTE_AHBDMAAUD_STAT0 field values */
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = 0x20,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = 0x10,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = 0x08,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = 0x04,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
+
+/* IH_MUTE field values */
+ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT = 0x2,
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT = 0x1,
+
+/* TX_INVID0 field values */
+ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_MASK = 0x80,
+ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_ENABLE = 0x80,
+ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE = 0x00,
+ HDMI_TX_INVID0_VIDEO_MAPPING_MASK = 0x1F,
+ HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET = 0,
+
+/* TX_INSTUFFING field values */
+ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_MASK = 0x4,
+ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE = 0x4,
+ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_DISABLE = 0x0,
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_MASK = 0x2,
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE = 0x2,
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_DISABLE = 0x0,
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_MASK = 0x1,
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE = 0x1,
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_DISABLE = 0x0,
+
+/* VP_PR_CD field values */
+ HDMI_VP_PR_CD_COLOR_DEPTH_MASK = 0xF0,
+ HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET = 4,
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK = 0x0F,
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET = 0,
+
+/* VP_STUFF field values */
+ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK = 0x20,
+ HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET = 5,
+ HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK = 0x10,
+ HDMI_VP_STUFF_IFIX_PP_TO_LAST_OFFSET = 4,
+ HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK = 0x8,
+ HDMI_VP_STUFF_ICX_GOTO_P0_ST_OFFSET = 3,
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK = 0x4,
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE = 0x4,
+ HDMI_VP_STUFF_YCC422_STUFFING_DIRECT_MODE = 0x0,
+ HDMI_VP_STUFF_PP_STUFFING_MASK = 0x2,
+ HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE = 0x2,
+ HDMI_VP_STUFF_PP_STUFFING_DIRECT_MODE = 0x0,
+ HDMI_VP_STUFF_PR_STUFFING_MASK = 0x1,
+ HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE = 0x1,
+ HDMI_VP_STUFF_PR_STUFFING_DIRECT_MODE = 0x0,
+
+/* VP_CONF field values */
+ HDMI_VP_CONF_BYPASS_EN_MASK = 0x40,
+ HDMI_VP_CONF_BYPASS_EN_ENABLE = 0x40,
+ HDMI_VP_CONF_BYPASS_EN_DISABLE = 0x00,
+ HDMI_VP_CONF_PP_EN_ENMASK = 0x20,
+ HDMI_VP_CONF_PP_EN_ENABLE = 0x20,
+ HDMI_VP_CONF_PP_EN_DISABLE = 0x00,
+ HDMI_VP_CONF_PR_EN_MASK = 0x10,
+ HDMI_VP_CONF_PR_EN_ENABLE = 0x10,
+ HDMI_VP_CONF_PR_EN_DISABLE = 0x00,
+ HDMI_VP_CONF_YCC422_EN_MASK = 0x8,
+ HDMI_VP_CONF_YCC422_EN_ENABLE = 0x8,
+ HDMI_VP_CONF_YCC422_EN_DISABLE = 0x0,
+ HDMI_VP_CONF_BYPASS_SELECT_MASK = 0x4,
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER = 0x4,
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER = 0x0,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_MASK = 0x3,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS = 0x3,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422 = 0x1,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_PP = 0x0,
+
+/* VP_REMAP field values */
+ HDMI_VP_REMAP_MASK = 0x3,
+ HDMI_VP_REMAP_YCC422_24bit = 0x2,
+ HDMI_VP_REMAP_YCC422_20bit = 0x1,
+ HDMI_VP_REMAP_YCC422_16bit = 0x0,
+
+/* FC_INVIDCONF field values */
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_MASK = 0x80,
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE = 0x80,
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE = 0x00,
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK = 0x40,
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH = 0x40,
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK = 0x20,
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH = 0x20,
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK = 0x10,
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH = 0x10,
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW = 0x00,
+ HDMI_FC_INVIDCONF_DVI_MODEZ_MASK = 0x8,
+ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE = 0x8,
+ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE = 0x0,
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK = 0x2,
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH = 0x2,
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW = 0x0,
+ HDMI_FC_INVIDCONF_IN_I_P_MASK = 0x1,
+ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED = 0x1,
+ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE = 0x0,
+
+/* FC_AUDICONF0 field values */
+ HDMI_FC_AUDICONF0_CC_OFFSET = 4,
+ HDMI_FC_AUDICONF0_CC_MASK = 0x70,
+ HDMI_FC_AUDICONF0_CT_OFFSET = 0,
+ HDMI_FC_AUDICONF0_CT_MASK = 0xF,
+
+/* FC_AUDICONF1 field values */
+ HDMI_FC_AUDICONF1_SS_OFFSET = 3,
+ HDMI_FC_AUDICONF1_SS_MASK = 0x18,
+ HDMI_FC_AUDICONF1_SF_OFFSET = 0,
+ HDMI_FC_AUDICONF1_SF_MASK = 0x7,
+
+/* FC_AUDICONF3 field values */
+ HDMI_FC_AUDICONF3_LFEPBL_OFFSET = 5,
+ HDMI_FC_AUDICONF3_LFEPBL_MASK = 0x60,
+ HDMI_FC_AUDICONF3_DM_INH_OFFSET = 4,
+ HDMI_FC_AUDICONF3_DM_INH_MASK = 0x10,
+ HDMI_FC_AUDICONF3_LSV_OFFSET = 0,
+ HDMI_FC_AUDICONF3_LSV_MASK = 0xF,
+
+/* FC_AUDSCHNLS0 field values */
+ HDMI_FC_AUDSCHNLS0_CGMSA_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS0_CGMSA_MASK = 0x30,
+ HDMI_FC_AUDSCHNLS0_COPYRIGHT_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS0_COPYRIGHT_MASK = 0x01,
+
+/* FC_AUDSCHNLS3-6 field values */
+ HDMI_FC_AUDSCHNLS3_OIEC_CH0_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS3_OIEC_CH0_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS3_OIEC_CH1_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS3_OIEC_CH1_MASK = 0xf0,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH2_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH2_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH3_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH3_MASK = 0xf0,
+
+ HDMI_FC_AUDSCHNLS5_OIEC_CH0_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS5_OIEC_CH0_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS5_OIEC_CH1_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS5_OIEC_CH1_MASK = 0xf0,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH2_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH2_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH3_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH3_MASK = 0xf0,
+
+/* HDMI_FC_AUDSCHNLS7 field values */
+ HDMI_FC_AUDSCHNLS7_ACCURACY_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS7_ACCURACY_MASK = 0x30,
+
+/* HDMI_FC_AUDSCHNLS8 field values */
+ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_MASK = 0xf0,
+ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_OFFSET = 0,
+
+/* FC_AUDSCONF field values */
+ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_MASK = 0xF0,
+ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_OFFSET = 4,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK = 0x1,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_OFFSET = 0,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1 = 0x1,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0 = 0x0,
+
+/* FC_STAT2 field values */
+ HDMI_FC_STAT2_OVERFLOW_MASK = 0x03,
+ HDMI_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* FC_INT2 field values */
+ HDMI_FC_INT2_OVERFLOW_MASK = 0x03,
+ HDMI_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* FC_MASK2 field values */
+ HDMI_FC_MASK2_OVERFLOW_MASK = 0x03,
+ HDMI_FC_MASK2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_FC_MASK2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* FC_PRCONF field values */
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK = 0xF0,
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET = 4,
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+
+/* FC_AVICONF0-FC_AVICONF3 field values */
+ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
+ HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
+ HDMI_FC_AVICONF0_PIX_FMT_YCBCR422 = 0x01,
+ HDMI_FC_AVICONF0_PIX_FMT_YCBCR444 = 0x02,
+ HDMI_FC_AVICONF0_ACTIVE_FMT_MASK = 0x40,
+ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT = 0x40,
+ HDMI_FC_AVICONF0_ACTIVE_FMT_NO_INFO = 0x00,
+ HDMI_FC_AVICONF0_BAR_DATA_MASK = 0x0C,
+ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA = 0x00,
+ HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR = 0x04,
+ HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR = 0x08,
+ HDMI_FC_AVICONF0_BAR_DATA_VERT_HORIZ_BAR = 0x0C,
+ HDMI_FC_AVICONF0_SCAN_INFO_MASK = 0x30,
+ HDMI_FC_AVICONF0_SCAN_INFO_OVERSCAN = 0x10,
+ HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN = 0x20,
+ HDMI_FC_AVICONF0_SCAN_INFO_NODATA = 0x00,
+
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK = 0x0F,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_USE_CODED = 0x08,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3 = 0x09,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9 = 0x0A,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_14_9 = 0x0B,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_MASK = 0x30,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_NO_DATA = 0x00,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3 = 0x10,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9 = 0x20,
+ HDMI_FC_AVICONF1_COLORIMETRY_MASK = 0xC0,
+ HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA = 0x00,
+ HDMI_FC_AVICONF1_COLORIMETRY_SMPTE = 0x40,
+ HDMI_FC_AVICONF1_COLORIMETRY_ITUR = 0x80,
+ HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO = 0xC0,
+
+ HDMI_FC_AVICONF2_SCALING_MASK = 0x03,
+ HDMI_FC_AVICONF2_SCALING_NONE = 0x00,
+ HDMI_FC_AVICONF2_SCALING_HORIZ = 0x01,
+ HDMI_FC_AVICONF2_SCALING_VERT = 0x02,
+ HDMI_FC_AVICONF2_SCALING_HORIZ_VERT = 0x03,
+ HDMI_FC_AVICONF2_RGB_QUANT_MASK = 0x0C,
+ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT = 0x00,
+ HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE = 0x04,
+ HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE = 0x08,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK = 0x70,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601 = 0x00,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709 = 0x10,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_SYCC601 = 0x20,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_YCC601 = 0x30,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_RGB = 0x40,
+ HDMI_FC_AVICONF2_IT_CONTENT_MASK = 0x80,
+ HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA = 0x00,
+ HDMI_FC_AVICONF2_IT_CONTENT_VALID = 0x80,
+
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_MASK = 0x03,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS = 0x00,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_PHOTO = 0x01,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_CINEMA = 0x02,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GAME = 0x03,
+ HDMI_FC_AVICONF3_QUANT_RANGE_MASK = 0x0C,
+ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00,
+ HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04,
+
+/* FC_DBGFORCE field values */
+ HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10,
+ HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1,
+
+/* PHY_CONF0 field values */
+ HDMI_PHY_CONF0_PDZ_MASK = 0x80,
+ HDMI_PHY_CONF0_PDZ_OFFSET = 7,
+ HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
+ HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
+ HDMI_PHY_CONF0_SPARECTRL = 0x20,
+ HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
+ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET = 3,
+ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK = 0x4,
+ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET = 2,
+ HDMI_PHY_CONF0_SELDATAENPOL_MASK = 0x2,
+ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET = 1,
+ HDMI_PHY_CONF0_SELDIPIF_MASK = 0x1,
+ HDMI_PHY_CONF0_SELDIPIF_OFFSET = 0,
+
+/* PHY_TST0 field values */
+ HDMI_PHY_TST0_TSTCLR_MASK = 0x20,
+ HDMI_PHY_TST0_TSTCLR_OFFSET = 5,
+ HDMI_PHY_TST0_TSTEN_MASK = 0x10,
+ HDMI_PHY_TST0_TSTEN_OFFSET = 4,
+ HDMI_PHY_TST0_TSTCLK_MASK = 0x1,
+ HDMI_PHY_TST0_TSTCLK_OFFSET = 0,
+
+/* PHY_STAT0 field values */
+ HDMI_PHY_RX_SENSE3 = 0x80,
+ HDMI_PHY_RX_SENSE2 = 0x40,
+ HDMI_PHY_RX_SENSE1 = 0x20,
+ HDMI_PHY_RX_SENSE0 = 0x10,
+ HDMI_PHY_HPD = 0x02,
+ HDMI_PHY_TX_PHY_LOCK = 0x01,
+
+/* PHY_I2CM_SLAVE_ADDR field values */
+ HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2 = 0x69,
+ HDMI_PHY_I2CM_SLAVE_ADDR_HEAC_PHY = 0x49,
+
+/* PHY_I2CM_OPERATION_ADDR field values */
+ HDMI_PHY_I2CM_OPERATION_ADDR_WRITE = 0x10,
+ HDMI_PHY_I2CM_OPERATION_ADDR_READ = 0x1,
+
+/* HDMI_PHY_I2CM_INT_ADDR */
+ HDMI_PHY_I2CM_INT_ADDR_DONE_POL = 0x08,
+ HDMI_PHY_I2CM_INT_ADDR_DONE_MASK = 0x04,
+
+/* HDMI_PHY_I2CM_CTLINT_ADDR */
+ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL = 0x80,
+ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_MASK = 0x40,
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
+
+/* AUD_CTS3 field values */
+ HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
+ HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
+ HDMI_AUD_CTS3_N_SHIFT_1 = 0,
+ HDMI_AUD_CTS3_N_SHIFT_16 = 0x20,
+ HDMI_AUD_CTS3_N_SHIFT_32 = 0x40,
+ HDMI_AUD_CTS3_N_SHIFT_64 = 0x60,
+ HDMI_AUD_CTS3_N_SHIFT_128 = 0x80,
+ HDMI_AUD_CTS3_N_SHIFT_256 = 0xa0,
+ /* note that the CTS3 MANUAL bit has been removed
+ from our part. Can't set it, will read as 0. */
+ HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
+ HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
+
+/* AHB_DMA_CONF0 field values */
+ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
+ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
+ HDMI_AHB_DMA_CONF0_HBR = 0x10,
+ HDMI_AHB_DMA_CONF0_EN_HLOCK_OFFSET = 3,
+ HDMI_AHB_DMA_CONF0_EN_HLOCK_MASK = 0x08,
+ HDMI_AHB_DMA_CONF0_INCR_TYPE_OFFSET = 1,
+ HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK = 0x06,
+ HDMI_AHB_DMA_CONF0_INCR4 = 0x0,
+ HDMI_AHB_DMA_CONF0_INCR8 = 0x2,
+ HDMI_AHB_DMA_CONF0_INCR16 = 0x4,
+ HDMI_AHB_DMA_CONF0_BURST_MODE = 0x1,
+
+/* HDMI_AHB_DMA_START field values */
+ HDMI_AHB_DMA_START_START_OFFSET = 0,
+ HDMI_AHB_DMA_START_START_MASK = 0x01,
+
+/* HDMI_AHB_DMA_STOP field values */
+ HDMI_AHB_DMA_STOP_STOP_OFFSET = 0,
+ HDMI_AHB_DMA_STOP_STOP_MASK = 0x01,
+
+/* AHB_DMA_STAT, AHB_DMA_INT, AHB_DMA_MASK, AHB_DMA_POL field values */
+ HDMI_AHB_DMA_DONE = 0x80,
+ HDMI_AHB_DMA_RETRY_SPLIT = 0x40,
+ HDMI_AHB_DMA_LOSTOWNERSHIP = 0x20,
+ HDMI_AHB_DMA_ERROR = 0x10,
+ HDMI_AHB_DMA_FIFO_THREMPTY = 0x04,
+ HDMI_AHB_DMA_FIFO_FULL = 0x02,
+ HDMI_AHB_DMA_FIFO_EMPTY = 0x01,
+
+/* AHB_DMA_BUFFSTAT, AHB_DMA_BUFFINT,AHB_DMA_BUFFMASK,AHB_DMA_BUFFPOL values */
+ HDMI_AHB_DMA_BUFFSTAT_FULL = 0x02,
+ HDMI_AHB_DMA_BUFFSTAT_EMPTY = 0x01,
+
+/* MC_CLKDIS field values */
+ HDMI_MC_CLKDIS_HDCPCLK_DISABLE = 0x40,
+ HDMI_MC_CLKDIS_CECCLK_DISABLE = 0x20,
+ HDMI_MC_CLKDIS_CSCCLK_DISABLE = 0x10,
+ HDMI_MC_CLKDIS_AUDCLK_DISABLE = 0x8,
+ HDMI_MC_CLKDIS_PREPCLK_DISABLE = 0x4,
+ HDMI_MC_CLKDIS_TMDSCLK_DISABLE = 0x2,
+ HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
+
+/* MC_SWRSTZ field values */
+ HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
+
+/* MC_FLOWCTRL field values */
+ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_MASK = 0x1,
+ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH = 0x1,
+ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0,
+
+/* MC_PHYRSTZ field values */
+ HDMI_MC_PHYRSTZ_ASSERT = 0x0,
+ HDMI_MC_PHYRSTZ_DEASSERT = 0x1,
+
+/* MC_HEACPHY_RST field values */
+ HDMI_MC_HEACPHY_RST_ASSERT = 0x1,
+ HDMI_MC_HEACPHY_RST_DEASSERT = 0x0,
+
+/* CSC_CFG field values */
+ HDMI_CSC_CFG_INTMODE_MASK = 0x30,
+ HDMI_CSC_CFG_INTMODE_OFFSET = 4,
+ HDMI_CSC_CFG_INTMODE_DISABLE = 0x00,
+ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1 = 0x10,
+ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA2 = 0x20,
+ HDMI_CSC_CFG_DECMODE_MASK = 0x3,
+ HDMI_CSC_CFG_DECMODE_OFFSET = 0,
+ HDMI_CSC_CFG_DECMODE_DISABLE = 0x0,
+ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1 = 0x1,
+ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA2 = 0x2,
+ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3 = 0x3,
+
+/* CSC_SCALE field values */
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK = 0xF0,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP = 0x00,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP = 0x50,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP = 0x60,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP = 0x70,
+ HDMI_CSC_SCALE_CSCSCALE_MASK = 0x03,
+
+/* A_HDCPCFG0 field values */
+ HDMI_A_HDCPCFG0_ELVENA_MASK = 0x80,
+ HDMI_A_HDCPCFG0_ELVENA_ENABLE = 0x80,
+ HDMI_A_HDCPCFG0_ELVENA_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_I2CFASTMODE_MASK = 0x40,
+ HDMI_A_HDCPCFG0_I2CFASTMODE_ENABLE = 0x40,
+ HDMI_A_HDCPCFG0_I2CFASTMODE_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_BYPENCRYPTION_MASK = 0x20,
+ HDMI_A_HDCPCFG0_BYPENCRYPTION_ENABLE = 0x20,
+ HDMI_A_HDCPCFG0_BYPENCRYPTION_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_SYNCRICHECK_MASK = 0x10,
+ HDMI_A_HDCPCFG0_SYNCRICHECK_ENABLE = 0x10,
+ HDMI_A_HDCPCFG0_SYNCRICHECK_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_AVMUTE_MASK = 0x8,
+ HDMI_A_HDCPCFG0_AVMUTE_ENABLE = 0x8,
+ HDMI_A_HDCPCFG0_AVMUTE_DISABLE = 0x0,
+ HDMI_A_HDCPCFG0_RXDETECT_MASK = 0x4,
+ HDMI_A_HDCPCFG0_RXDETECT_ENABLE = 0x4,
+ HDMI_A_HDCPCFG0_RXDETECT_DISABLE = 0x0,
+ HDMI_A_HDCPCFG0_EN11FEATURE_MASK = 0x2,
+ HDMI_A_HDCPCFG0_EN11FEATURE_ENABLE = 0x2,
+ HDMI_A_HDCPCFG0_EN11FEATURE_DISABLE = 0x0,
+ HDMI_A_HDCPCFG0_HDMIDVI_MASK = 0x1,
+ HDMI_A_HDCPCFG0_HDMIDVI_HDMI = 0x1,
+ HDMI_A_HDCPCFG0_HDMIDVI_DVI = 0x0,
+
+/* A_HDCPCFG1 field values */
+ HDMI_A_HDCPCFG1_DISSHA1CHECK_MASK = 0x8,
+ HDMI_A_HDCPCFG1_DISSHA1CHECK_DISABLE = 0x8,
+ HDMI_A_HDCPCFG1_DISSHA1CHECK_ENABLE = 0x0,
+ HDMI_A_HDCPCFG1_PH2UPSHFTENC_MASK = 0x4,
+ HDMI_A_HDCPCFG1_PH2UPSHFTENC_ENABLE = 0x4,
+ HDMI_A_HDCPCFG1_PH2UPSHFTENC_DISABLE = 0x0,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK = 0x2,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE = 0x2,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_ENABLE = 0x0,
+ HDMI_A_HDCPCFG1_SWRESET_MASK = 0x1,
+ HDMI_A_HDCPCFG1_SWRESET_ASSERT = 0x0,
+
+/* A_VIDPOLCFG field values */
+ HDMI_A_VIDPOLCFG_UNENCRYPTCONF_MASK = 0x60,
+ HDMI_A_VIDPOLCFG_UNENCRYPTCONF_OFFSET = 5,
+ HDMI_A_VIDPOLCFG_DATAENPOL_MASK = 0x10,
+ HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH = 0x10,
+ HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW = 0x0,
+ HDMI_A_VIDPOLCFG_VSYNCPOL_MASK = 0x8,
+ HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_HIGH = 0x8,
+ HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_LOW = 0x0,
+ HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2,
+ HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
+ HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
+};
+#endif /* __IMX_HDMI_H__ */
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index 654bf03..7e59329 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -167,9 +167,8 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
/* set display clock mux to LDB input clock */
ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]);
- if (ret) {
+ if (ret)
dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno);
- }
}
static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
@@ -414,7 +413,7 @@ enum {
LVDS_BIT_MAP_JEIDA
};
-static const char *imx_ldb_bit_mappings[] = {
+static const char * const imx_ldb_bit_mappings[] = {
[LVDS_BIT_MAP_SPWG] = "spwg",
[LVDS_BIT_MAP_JEIDA] = "jeida",
};
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 2c44fef..9abc7ca 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -560,7 +560,7 @@ static const char *imx_tve_modes[] = {
[TVE_MODE_VGA] = "vga",
};
-const int of_get_tve_mode(struct device_node *np)
+static const int of_get_tve_mode(struct device_node *np)
{
const char *bm;
int ret, i;
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 97ca692..ca85d3d 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/export.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index ce6ba98..22be104 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -218,7 +218,8 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
if (ipu_crtc->newfb) {
ipu_crtc->newfb = NULL;
- ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, 0, 0);
+ ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb,
+ ipu_crtc->plane[0]->x, ipu_crtc->plane[0]->y);
ipu_crtc_handle_pageflip(ipu_crtc);
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index d97454a..34b642a 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -64,6 +64,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
{
struct ipu_ch_param __iomem *cpmem;
struct drm_gem_cma_object *cma_obj;
+ unsigned long eba;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
@@ -76,8 +77,15 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
cpmem = ipu_get_cpmem(ipu_plane->ipu_ch);
ipu_cpmem_set_stride(cpmem, fb->pitches[0]);
- ipu_cpmem_set_buffer(cpmem, 0, cma_obj->paddr + fb->offsets[0] +
- fb->pitches[0] * y + x);
+
+ eba = cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+ ipu_cpmem_set_buffer(cpmem, 0, eba);
+ ipu_cpmem_set_buffer(cpmem, 1, eba);
+
+ /* cache offsets for subsequent pageflips */
+ ipu_plane->x = x;
+ ipu_plane->y = y;
return 0;
}
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index 24aa9be..351d61d 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -23,6 +23,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/videodev2.h>
+#include <video/of_display_timing.h>
#include "imx-drm.h"
@@ -74,7 +75,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
- of_get_drm_display_mode(np, &imxpd->mode, 0);
+ of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
diff --git a/drivers/staging/keucr/smcommon.h b/drivers/staging/keucr/smcommon.h
index 4d57203..1d2752a 100644
--- a/drivers/staging/keucr/smcommon.h
+++ b/drivers/staging/keucr/smcommon.h
@@ -4,7 +4,7 @@
/***************************************************************************
-Define Difinetion
+Define Definition
***************************************************************************/
#define SMSUCCESS 0x0000 /* SUCCESS */
#define ERROR 0xFFFF /* ERROR */
diff --git a/drivers/staging/keucr/smil.h b/drivers/staging/keucr/smil.h
index 1538d7b..9136e94 100644
--- a/drivers/staging/keucr/smil.h
+++ b/drivers/staging/keucr/smil.h
@@ -45,7 +45,7 @@ Retry Counter Definition
Hardware ECC Definition
***************************************************************************/
#define HW_ECC_SUPPORTED 1 /* Hardware ECC Supported */
-/* No difinition for Software ECC */
+/* No definition for Software ECC */
/***************************************************************************
SmartMedia Command & Status Definition
@@ -189,12 +189,6 @@ struct keucr_media_area {
WORD PhyBlock; /* Physical Block Number on Zone 0 */
};
-
-extern BYTE IsSSFDCCompliance;
-extern BYTE IsXDCompliance;
-
-extern DWORD ErrXDCode;
-extern DWORD ErrCode;
extern WORD ReadBlock;
extern WORD WriteBlock;
extern DWORD MediaChange;
diff --git a/drivers/staging/keucr/smilecc.c b/drivers/staging/keucr/smilecc.c
index 3085f1d..6b8f7d7 100644
--- a/drivers/staging/keucr/smilecc.c
+++ b/drivers/staging/keucr/smilecc.c
@@ -139,7 +139,7 @@ BYTE correct_data(BYTE *data, BYTE *eccdata, BYTE ecc1, BYTE ecc2, BYTE ecc3)
BYTE bit; /* Bit address of cor. DATA */
d1 = ecc1^eccdata[1]; d2 = ecc2^eccdata[0]; /* Compare LP's */
- d3 = ecc3^eccdata[2]; /* Comapre CP's */
+ d3 = ecc3^eccdata[2]; /* Compare CP's */
d = ((DWORD)d1<<16) /* Result of comparison */
+((DWORD)d2<<8)
+(DWORD)d3;
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 2786808..09d07e0 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -4,49 +4,28 @@
#include "smcommon.h"
#include "smil.h"
-int Check_D_LogCHS(WORD *, BYTE *, BYTE *);
-void Initialize_D_Media(void);
-void PowerOff_D_Media(void);
-int Check_D_MediaPower(void);
-int Check_D_MediaExist(void);
-int Check_D_MediaWP(void);
-int Check_D_MediaFmt(struct us_data *);
-int Check_D_MediaFmtForEraseAll(struct us_data *);
-int Conv_D_MediaAddr(struct us_data *, DWORD);
-int Inc_D_MediaAddr(struct us_data *);
-int Check_D_FirstSect(void);
-int Check_D_LastSect(void);
-int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
-int Media_D_WriteOneSect(struct us_data *, WORD, BYTE *);
-int Media_D_CopyBlockHead(struct us_data *);
-int Media_D_CopyBlockTail(struct us_data *);
-int Media_D_EraseOneBlock(void);
-int Media_D_EraseAllBlock(void);
-
-int Copy_D_BlockAll(struct us_data *, DWORD);
-int Copy_D_BlockHead(struct us_data *);
-int Copy_D_BlockTail(struct us_data *);
-int Reassign_D_BlockHead(struct us_data *);
-
-int Assign_D_WriteBlock(void);
-int Release_D_ReadBlock(struct us_data *);
-int Release_D_WriteBlock(struct us_data *);
-int Release_D_CopySector(struct us_data *);
-
-int Copy_D_PhyOneSect(struct us_data *);
-int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
-int Write_D_PhyOneSect(struct us_data *, WORD, BYTE *);
-int Erase_D_PhyOneBlock(struct us_data *);
-
-int Set_D_PhyFmtValue(struct us_data *);
-int Search_D_CIS(struct us_data *);
-int Make_D_LogTable(struct us_data *);
-void Check_D_BlockIsFull(void);
-
-int MarkFail_D_PhyOneBlock(struct us_data *);
-
-DWORD ErrXDCode;
-DWORD ErrCode;
+static int Conv_D_MediaAddr(struct us_data *, DWORD);
+static int Inc_D_MediaAddr(struct us_data *);
+static int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
+
+static int Copy_D_BlockAll(struct us_data *, DWORD);
+
+static int Assign_D_WriteBlock(void);
+static int Release_D_ReadBlock(struct us_data *);
+static int Release_D_WriteBlock(struct us_data *);
+static int Release_D_CopySector(struct us_data *);
+
+static int Copy_D_PhyOneSect(struct us_data *);
+static int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
+static int Erase_D_PhyOneBlock(struct us_data *);
+
+static int Set_D_PhyFmtValue(struct us_data *);
+static int Search_D_CIS(struct us_data *);
+static int Make_D_LogTable(struct us_data *);
+
+static int MarkFail_D_PhyOneBlock(struct us_data *);
+
+static DWORD ErrCode;
static BYTE WorkBuf[SECTSIZE];
static BYTE Redundant[REDTSIZE];
static BYTE WorkRedund[REDTSIZE];
@@ -65,10 +44,6 @@ static BYTE BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
#define Clr_D_Bit(a, b) (a[(BYTE)((b) / 8)] &= ~BitData[(b) % 8])
#define Chk_D_Bit(a, b) (a[(BYTE)((b) / 8)] & BitData[(b) % 8])
-BYTE IsSSFDCCompliance;
-BYTE IsXDCompliance;
-
-
/* ----- SM_FreeMem() ------------------------------------------------- */
int SM_FreeMem(void)
{
@@ -167,7 +142,7 @@ int Media_D_CopySector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
}
/* ----- Release_D_CopySector() ------------------------------------------ */
-int Release_D_CopySector(struct us_data *us)
+static int Release_D_CopySector(struct us_data *us)
{
Log2Phy[Media.Zone][Media.LogBlock] = WriteBlock;
Media.PhyBlock = ReadBlock;
@@ -211,7 +186,7 @@ int Check_D_MediaFmt(struct us_data *us)
/* SmartMedia Physical Address Control Subroutine */
/* ----- Conv_D_MediaAddr() --------------------------------------------- */
-int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
+static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
{
DWORD temp;
@@ -240,7 +215,7 @@ int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
}
/* ----- Inc_D_MediaAddr() ---------------------------------------------- */
-int Inc_D_MediaAddr(struct us_data *us)
+static int Inc_D_MediaAddr(struct us_data *us)
{
WORD LogBlock = Media.LogBlock;
@@ -290,7 +265,7 @@ int Inc_D_MediaAddr(struct us_data *us)
/* SmartMedia Read/Write Subroutine with Retry */
/* ----- Media_D_ReadOneSect() ------------------------------------------ */
-int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
{
DWORD err, retry;
@@ -334,7 +309,7 @@ int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
/* SmartMedia Physical Sector Data Copy Subroutine */
/* ----- Copy_D_BlockAll() ---------------------------------------------- */
-int Copy_D_BlockAll(struct us_data *us, DWORD mode)
+static int Copy_D_BlockAll(struct us_data *us, DWORD mode)
{
BYTE sect;
@@ -371,7 +346,7 @@ int Copy_D_BlockAll(struct us_data *us, DWORD mode)
/* SmartMedia Physical Block Assign/Release Subroutine */
/* ----- Assign_D_WriteBlock() ------------------------------------------ */
-int Assign_D_WriteBlock(void)
+static int Assign_D_WriteBlock(void)
{
ReadBlock = Media.PhyBlock;
@@ -404,7 +379,7 @@ int Assign_D_WriteBlock(void)
}
/* ----- Release_D_ReadBlock() ------------------------------------------ */
-int Release_D_ReadBlock(struct us_data *us)
+static int Release_D_ReadBlock(struct us_data *us)
{
DWORD mode;
@@ -438,7 +413,7 @@ int Release_D_ReadBlock(struct us_data *us)
}
/* ----- Release_D_WriteBlock() ----------------------------------------- */
-int Release_D_WriteBlock(struct us_data *us)
+static int Release_D_WriteBlock(struct us_data *us)
{
SectCopyMode = COMPLETED;
Media.PhyBlock = WriteBlock;
@@ -452,12 +427,12 @@ int Release_D_WriteBlock(struct us_data *us)
/* SmartMedia Physical Sector Data Copy Subroutine */
/* ----- Copy_D_PhyOneSect() -------------------------------------------- */
-int Copy_D_PhyOneSect(struct us_data *us)
+static int Copy_D_PhyOneSect(struct us_data *us)
{
int i;
DWORD err, retry;
- /* pr_info("Copy_D_PhyOneSect --- Secotr = %x\n", Media.Sector); */
+ /* pr_info("Copy_D_PhyOneSect --- Sector = %x\n", Media.Sector); */
if (ReadBlock != NO_ASSIGN) {
Media.PhyBlock = ReadBlock;
for (retry = 0; retry < 2; retry++) {
@@ -529,7 +504,7 @@ int Copy_D_PhyOneSect(struct us_data *us)
/* SmartMedia Physical Sector Read/Write/Erase Subroutine */
/* ----- Read_D_PhyOneSect() -------------------------------------------- */
-int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
{
int i;
DWORD retry;
@@ -580,7 +555,7 @@ int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
}
/* ----- Erase_D_PhyOneBlock() ------------------------------------------ */
-int Erase_D_PhyOneBlock(struct us_data *us)
+static int Erase_D_PhyOneBlock(struct us_data *us)
{
if (Ssfdc_D_EraseBlock(us)) {
ErrCode = ERR_HwError;
@@ -597,7 +572,7 @@ int Erase_D_PhyOneBlock(struct us_data *us)
/* SmartMedia Physical Format Check Local Subroutine */
/* ----- Set_D_PhyFmtValue() -------------------------------------------- */
-int Set_D_PhyFmtValue(struct us_data *us)
+static int Set_D_PhyFmtValue(struct us_data *us)
{
if (Set_D_SsfdcModel(us->SM_DeviceID))
return ERROR;
@@ -606,7 +581,7 @@ int Set_D_PhyFmtValue(struct us_data *us)
}
/* ----- Search_D_CIS() ------------------------------------------------- */
-int Search_D_CIS(struct us_data *us)
+static int Search_D_CIS(struct us_data *us)
{
Media.Zone = 0;
Media.Sector = 0;
@@ -660,7 +635,7 @@ int Search_D_CIS(struct us_data *us)
}
/* ----- Make_D_LogTable() ---------------------------------------------- */
-int Make_D_LogTable(struct us_data *us)
+static int Make_D_LogTable(struct us_data *us)
{
WORD phyblock, logblock;
@@ -761,7 +736,7 @@ int Make_D_LogTable(struct us_data *us)
}
/* ----- MarkFail_D_PhyOneBlock() --------------------------------------- */
-int MarkFail_D_PhyOneBlock(struct us_data *us)
+static int MarkFail_D_PhyOneBlock(struct us_data *us)
{
BYTE sect;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 346c570..16da9a9 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -6,45 +6,16 @@
#include "smcommon.h"
#include "smil.h"
-void _Set_D_SsfdcRdCmd(BYTE);
-void _Set_D_SsfdcRdAddr(BYTE);
-void _Set_D_SsfdcRdChip(void);
-void _Set_D_SsfdcRdStandby(void);
-void _Start_D_SsfdcRdHwECC(void);
-void _Stop_D_SsfdcRdHwECC(void);
-void _Load_D_SsfdcRdHwECC(BYTE);
-void _Set_D_SsfdcWrCmd(BYTE);
-void _Set_D_SsfdcWrAddr(BYTE);
-void _Set_D_SsfdcWrBlock(void);
-void _Set_D_SsfdcWrStandby(void);
-void _Start_D_SsfdcWrHwECC(void);
-void _Load_D_SsfdcWrHwECC(BYTE);
-int _Check_D_SsfdcBusy(WORD);
-int _Check_D_SsfdcStatus(void);
-void _Reset_D_SsfdcErr(void);
-void _Read_D_SsfdcBuf(BYTE *);
-void _Write_D_SsfdcBuf(BYTE *);
-void _Read_D_SsfdcByte(BYTE *);
-void _ReadRedt_D_SsfdcBuf(BYTE *);
-void _WriteRedt_D_SsfdcBuf(BYTE *);
-BYTE _Check_D_DevCode(BYTE);
-
-void _Set_D_ECCdata(BYTE, BYTE *);
-void _Calc_D_ECCdata(BYTE *);
-
+static BYTE _Check_D_DevCode(BYTE);
+static DWORD ErrXDCode;
+static BYTE IsSSFDCCompliance;
+static BYTE IsXDCompliance;
struct keucr_media_info Ssfdc;
struct keucr_media_address Media;
struct keucr_media_area CisArea;
static BYTE EccBuf[6];
-extern PBYTE SMHostAddr;
-extern DWORD ErrXDCode;
-
-extern WORD ReadBlock;
-extern WORD WriteBlock;
-
-
#define EVEN 0 /* Even Page for 256byte/page */
#define ODD 1 /* Odd Page for 256byte/page */
diff --git a/drivers/staging/keucr/smscsi.c b/drivers/staging/keucr/smscsi.c
index 572d648..5c03eca 100644
--- a/drivers/staging/keucr/smscsi.c
+++ b/drivers/staging/keucr/smscsi.c
@@ -11,16 +11,12 @@
#include "transport.h"
#include "smil.h"
-int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Start_Stop(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb);
-
-extern PBYTE SMHostAddr;
-extern DWORD ErrXDCode;
+static int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb);
/* ----- SM_SCSIIrp() -------------------------------------------------- */
int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
@@ -57,7 +53,7 @@ int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Test_Unit_Ready() ------------------------------------- */
-int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
{
if (us->SM_Status.Insert && us->SM_Status.Ready)
return USB_STOR_TRANSPORT_GOOD;
@@ -70,7 +66,7 @@ int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Inquiry() --------------------------------------------- */
-int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20,
@@ -84,7 +80,7 @@ int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
/* ----- SM_SCSI_Mode_Sense() ------------------------------------------ */
-int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
{
BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
@@ -101,7 +97,7 @@ int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Read_Capacity() --------------------------------------- */
-int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
@@ -133,7 +129,7 @@ int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Read() -------------------------------------------------- */
-int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
PBYTE Cdb = srb->cmnd;
@@ -165,7 +161,7 @@ int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Write() -------------------------------------------------- */
-int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
PBYTE Cdb = srb->cmnd;
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index a84ee63..3e3ca63 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -2,7 +2,6 @@
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index cc5d62d..7a6d85e 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -38,6 +38,7 @@ static const struct usb_device_id line6_id_table[] = {
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_GUITARPORT)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_POCKETPOD)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD300)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD400)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD500)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_GX)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX1)},
@@ -64,6 +65,7 @@ static struct line6_properties line6_properties_table[] = {
{ LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
{ LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
{ LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODHD400, "PODHD400", "POD HD400", LINE6_BIT_CONTROL_PCM_HWMON },
{ LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
{ LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
{ LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
@@ -352,6 +354,7 @@ static void line6_data_received(struct urb *urb)
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
case LINE6_DEVID_PODHD500:
break; /* let userspace handle MIDI */
@@ -684,6 +687,7 @@ static int line6_probe(struct usb_interface *interface,
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTPRO:
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
alternate = 5;
break;
@@ -738,6 +742,7 @@ static int line6_probe(struct usb_interface *interface,
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
size = sizeof(struct usb_line6_podhd);
ep_read = 0x84;
ep_write = 0x03;
@@ -896,6 +901,7 @@ static int line6_probe(struct usb_interface *interface,
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
case LINE6_DEVID_PODHD500:
ret = line6_podhd_init(interface,
(struct usb_line6_podhd *)line6);
@@ -1023,6 +1029,7 @@ static void line6_disconnect(struct usb_interface *interface)
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
case LINE6_DEVID_PODHD500:
line6_podhd_disconnect(interface);
break;
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 6a0648c..df8331b 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -436,6 +436,7 @@ int line6_init_pcm(struct usb_line6 *line6,
case LINE6_DEVID_PODXTLIVE:
case LINE6_DEVID_PODXTPRO:
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
ep_read = 0x82;
ep_write = 0x01;
break;
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index 43eb540..90caddd 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -25,6 +25,7 @@
#define LINE6_DEVID_GUITARPORT 0x4750
#define LINE6_DEVID_POCKETPOD 0x5051
#define LINE6_DEVID_PODHD300 0x5057
+#define LINE6_DEVID_PODHD400 0x5058
#define LINE6_DEVID_PODHD500 0x414D
#define LINE6_DEVID_PODSTUDIO_GX 0x4153
#define LINE6_DEVID_PODSTUDIO_UX1 0x4150
@@ -48,6 +49,7 @@ enum {
LINE6_INDEX_GUITARPORT,
LINE6_INDEX_POCKETPOD,
LINE6_INDEX_PODHD300,
+ LINE6_INDEX_PODHD400,
LINE6_INDEX_PODHD500,
LINE6_INDEX_PODSTUDIO_GX,
LINE6_INDEX_PODSTUDIO_UX1,
@@ -68,6 +70,7 @@ enum {
LINE6_BIT(GUITARPORT),
LINE6_BIT(POCKETPOD),
LINE6_BIT(PODHD300),
+ LINE6_BIT(PODHD400),
LINE6_BIT(PODHD500),
LINE6_BIT(PODSTUDIO_GX),
LINE6_BIT(PODSTUDIO_UX1),
@@ -88,7 +91,9 @@ enum {
LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE |
LINE6_BIT_PODXTPRO,
LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
- LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 | LINE6_BIT_PODHD500,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
+ LINE6_BIT_PODHD400 |
+ LINE6_BIT_PODHD500,
LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT |
LINE6_BIT_BASSPODXTLIVE |
LINE6_BIT_BASSPODXTPRO
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index de8e35b..507d16b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -61,7 +61,6 @@ int cfs_curproc_groups_nr(void);
*/
/* check if task is running in compat mode.*/
-int current_is_32bit(void);
#define current_pid() (current->pid)
#define current_comm() (current->comm)
int cfs_get_environ(const char *key, char *value, int *val_len);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 687dbab..4a6c7da 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -181,8 +181,6 @@ static inline void *__container_of(void *ptr, unsigned long shift)
#define container_of0(ptr, type, member) \
((type *)__container_of((void *)(ptr), offsetof(type, member)))
-#define SET_BUT_UNUSED(a) do { } while(sizeof(a) - sizeof(a))
-
#define _LIBCFS_H
#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 40282b7..2bd4885 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -82,76 +82,75 @@ struct ptldebug_header {
__u32 ph_line_num;
} __attribute__((packed));
-
#define PH_FLAG_FIRST_RECORD 1
/* Debugging subsystems (32 bits, non-overlapping) */
/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-#define S_UNDEFINED 0x00000001
-#define S_MDC 0x00000002
-#define S_MDS 0x00000004
-#define S_OSC 0x00000008
-#define S_OST 0x00000010
-#define S_CLASS 0x00000020
-#define S_LOG 0x00000040
-#define S_LLITE 0x00000080
-#define S_RPC 0x00000100
-#define S_MGMT 0x00000200
-#define S_LNET 0x00000400
-#define S_LND 0x00000800 /* ALL LNDs */
-#define S_PINGER 0x00001000
-#define S_FILTER 0x00002000
+#define S_UNDEFINED 0x00000001
+#define S_MDC 0x00000002
+#define S_MDS 0x00000004
+#define S_OSC 0x00000008
+#define S_OST 0x00000010
+#define S_CLASS 0x00000020
+#define S_LOG 0x00000040
+#define S_LLITE 0x00000080
+#define S_RPC 0x00000100
+#define S_MGMT 0x00000200
+#define S_LNET 0x00000400
+#define S_LND 0x00000800 /* ALL LNDs */
+#define S_PINGER 0x00001000
+#define S_FILTER 0x00002000
/* unused */
-#define S_ECHO 0x00008000
-#define S_LDLM 0x00010000
-#define S_LOV 0x00020000
-#define S_LQUOTA 0x00040000
+#define S_ECHO 0x00008000
+#define S_LDLM 0x00010000
+#define S_LOV 0x00020000
+#define S_LQUOTA 0x00040000
#define S_OSD 0x00080000
/* unused */
/* unused */
/* unused */
-#define S_LMV 0x00800000 /* b_new_cmd */
+#define S_LMV 0x00800000 /* b_new_cmd */
/* unused */
-#define S_SEC 0x02000000 /* upcall cache */
-#define S_GSS 0x04000000 /* b_new_cmd */
+#define S_SEC 0x02000000 /* upcall cache */
+#define S_GSS 0x04000000 /* b_new_cmd */
/* unused */
-#define S_MGC 0x10000000
-#define S_MGS 0x20000000
-#define S_FID 0x40000000 /* b_new_cmd */
-#define S_FLD 0x80000000 /* b_new_cmd */
+#define S_MGC 0x10000000
+#define S_MGS 0x20000000
+#define S_FID 0x40000000 /* b_new_cmd */
+#define S_FLD 0x80000000 /* b_new_cmd */
/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
/* Debugging masks (32 bits, non-overlapping) */
/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
-#define D_INODE 0x00000002
-#define D_SUPER 0x00000004
-#define D_EXT2 0x00000008 /* anything from ext2_debug */
-#define D_MALLOC 0x00000010 /* print malloc, free information */
-#define D_CACHE 0x00000020 /* cache-related items */
-#define D_INFO 0x00000040 /* general information */
-#define D_IOCTL 0x00000080 /* ioctl related information */
-#define D_NETERROR 0x00000100 /* network errors */
-#define D_NET 0x00000200 /* network communications */
-#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
-#define D_BUFFS 0x00000800
-#define D_OTHER 0x00001000
-#define D_DENTRY 0x00002000
-#define D_NETTRACE 0x00004000
-#define D_PAGE 0x00008000 /* bulk page handling */
-#define D_DLMTRACE 0x00010000
-#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
-#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
-#define D_HA 0x00080000 /* recovery and failover */
-#define D_RPCTRACE 0x00100000 /* for distributed debugging */
-#define D_VFSTRACE 0x00200000
-#define D_READA 0x00400000 /* read-ahead */
-#define D_MMAP 0x00800000
-#define D_CONFIG 0x01000000
-#define D_CONSOLE 0x02000000
-#define D_QUOTA 0x04000000
-#define D_SEC 0x08000000
-#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
+#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
+#define D_INODE 0x00000002
+#define D_SUPER 0x00000004
+#define D_EXT2 0x00000008 /* anything from ext2_debug */
+#define D_MALLOC 0x00000010 /* print malloc, free information */
+#define D_CACHE 0x00000020 /* cache-related items */
+#define D_INFO 0x00000040 /* general information */
+#define D_IOCTL 0x00000080 /* ioctl related information */
+#define D_NETERROR 0x00000100 /* network errors */
+#define D_NET 0x00000200 /* network communications */
+#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
+#define D_BUFFS 0x00000800
+#define D_OTHER 0x00001000
+#define D_DENTRY 0x00002000
+#define D_NETTRACE 0x00004000
+#define D_PAGE 0x00008000 /* bulk page handling */
+#define D_DLMTRACE 0x00010000
+#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
+#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
+#define D_HA 0x00080000 /* recovery and failover */
+#define D_RPCTRACE 0x00100000 /* for distributed debugging */
+#define D_VFSTRACE 0x00200000
+#define D_READA 0x00400000 /* read-ahead */
+#define D_MMAP 0x00800000
+#define D_CONFIG 0x01000000
+#define D_CONSOLE 0x02000000
+#define D_QUOTA 0x04000000
+#define D_SEC 0x08000000
+#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
/* keep these in sync with lnet/{utils,libcfs}/debug.c */
#define D_HSM D_TRACE
@@ -166,41 +165,39 @@ struct ptldebug_header {
#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_BACKOFF 2
struct cfs_debug_limit_state {
- cfs_time_t cdls_next;
- unsigned int cdls_delay;
+ cfs_time_t cdls_next;
+ unsigned int cdls_delay;
int cdls_count;
};
struct libcfs_debug_msg_data {
- const char *msg_file;
- const char *msg_fn;
- int msg_subsys;
- int msg_line;
- int msg_mask;
- struct cfs_debug_limit_state *msg_cdls;
+ const char *msg_file;
+ const char *msg_fn;
+ int msg_subsys;
+ int msg_line;
+ int msg_mask;
+ struct cfs_debug_limit_state *msg_cdls;
};
-#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
-do { \
- (data)->msg_subsys = DEBUG_SUBSYSTEM; \
- (data)->msg_file = __FILE__; \
- (data)->msg_fn = __FUNCTION__; \
- (data)->msg_line = __LINE__; \
- (data)->msg_cdls = (cdls); \
- (data)->msg_mask = (mask); \
+#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
+do { \
+ (data)->msg_subsys = DEBUG_SUBSYSTEM; \
+ (data)->msg_file = __FILE__; \
+ (data)->msg_fn = __FUNCTION__; \
+ (data)->msg_line = __LINE__; \
+ (data)->msg_cdls = (cdls); \
+ (data)->msg_mask = (mask); \
} while (0)
-#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
- static struct libcfs_debug_msg_data dataname = { \
- .msg_subsys = DEBUG_SUBSYSTEM, \
- .msg_file = __FILE__, \
- .msg_fn = __FUNCTION__, \
- .msg_line = __LINE__, \
- .msg_cdls = (cdls) }; \
+#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
+ static struct libcfs_debug_msg_data dataname = { \
+ .msg_subsys = DEBUG_SUBSYSTEM, \
+ .msg_file = __FILE__, \
+ .msg_fn = __FUNCTION__, \
+ .msg_line = __LINE__, \
+ .msg_cdls = (cdls) }; \
dataname.msg_mask = (mask);
-
-
/**
* Filters out logging messages based on mask and subsystem.
*/
@@ -210,34 +207,31 @@ static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
}
-#define __CDEBUG(cdls, mask, format, ...) \
-do { \
- static struct libcfs_debug_msg_data msgdata; \
+#define __CDEBUG(cdls, mask, format, ...) \
+do { \
+ static struct libcfs_debug_msg_data msgdata; \
\
- CFS_CHECK_STACK(&msgdata, mask, cdls); \
+ CFS_CHECK_STACK(&msgdata, mask, cdls); \
\
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
- libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
- } \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
+ libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
+ } \
} while (0)
#define CDEBUG(mask, format, ...) __CDEBUG(NULL, mask, format, ## __VA_ARGS__)
-#define CDEBUG_LIMIT(mask, format, ...) \
-do { \
- static struct cfs_debug_limit_state cdls; \
- \
- __CDEBUG(&cdls, mask, format, ## __VA_ARGS__);\
+#define CDEBUG_LIMIT(mask, format, ...) \
+do { \
+ static struct cfs_debug_limit_state cdls; \
+ \
+ __CDEBUG(&cdls, mask, format, ## __VA_ARGS__); \
} while (0)
-
-
-
-#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
-#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
-#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
-#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
+#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
+#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
+#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
+#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
#define LCONSOLE(mask, format, ...) CDEBUG(D_CONSOLE | (mask), format, ## __VA_ARGS__)
#define LCONSOLE_INFO(format, ...) CDEBUG_LIMIT(D_CONSOLE, format, ## __VA_ARGS__)
@@ -248,20 +242,18 @@ do { \
#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
-
void libcfs_log_goto(struct libcfs_debug_msg_data *, const char *, long_ptr_t);
-#define GOTO(label, rc) \
-do { \
+#define GOTO(label, rc) \
+do { \
if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
- libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
+ libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
} else { \
- (void)(rc); \
- } \
- goto label; \
+ (void)(rc); \
+ } \
+ goto label; \
} while (0)
-
extern int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
__attribute__ ((format (printf, 2, 3)));
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 5be3679..74dda57 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -69,6 +69,7 @@ struct libcfs_ioctl_data {
char ioc_bulk[0];
};
+#define ioc_priority ioc_u32[0]
struct libcfs_ioctl_hdr {
__u32 ioc_len;
@@ -110,41 +111,38 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_TYPE 'e'
#define IOC_LIBCFS_MIN_NR 30
/* libcfs ioctls */
-#define IOC_LIBCFS_PANIC _IOWR('e', 30, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LWT_CONTROL _IOWR('e', 33, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LWT_SNAPSHOT _IOWR('e', 34, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LWT_LOOKUP_STRING _IOWR('e', 35, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PANIC _IOWR('e', 30, long)
+#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
+#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
+#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
+#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
+#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
+#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, long)
+#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, long)
+#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, long)
+#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
+#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
+#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long)
+#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
+#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
+#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
+#define IOC_LIBCFS_PING _IOWR('e', 61, long)
+#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long)
+#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_TXDESC _IOWR('e', 77, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
+#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
+#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long)
+#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long)
+#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long)
+#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long)
+#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long)
+#define IOC_LIBCFS_GET_TXDESC _IOWR('e', 77, long)
+#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long)
+#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
+#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
#define IOC_LIBCFS_MAX_NR 80
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
index c204b67..a09fed3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
@@ -42,7 +42,6 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
@@ -63,142 +62,15 @@
#include <linux/smp.h>
#include <linux/ctype.h>
#include <linux/compiler.h>
-#ifdef HAVE_MM_INLINE
-# include <linux/mm_inline.h>
-#endif
+#include <linux/mm_inline.h>
#include <linux/kallsyms.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/libcfs/linux/portals_compat25.h>
-
-/******************************************************************************/
-/* Module parameter support */
-#define CFS_MODULE_PARM(name, t, type, perm, desc) \
- module_param(name, type, perm);\
- MODULE_PARM_DESC(name, desc)
-
-#define CFS_SYSFS_MODULE_PARM 1 /* module parameters accessible via sysfs */
-
-/******************************************************************************/
-/* Light-weight trace
- * Support for temporary event tracing with minimal Heisenberg effect. */
-#define LWT_SUPPORT 0
-
-#define LWT_MEMORY (16<<20)
-
-#ifndef KLWT_SUPPORT
-# if !defined(BITS_PER_LONG)
-# error "BITS_PER_LONG not defined"
-# endif
-
-/* kernel hasn't defined this? */
-typedef struct {
- long long lwte_when;
- char *lwte_where;
- void *lwte_task;
- long lwte_p1;
- long lwte_p2;
- long lwte_p3;
- long lwte_p4;
-# if BITS_PER_LONG > 32
- long lwte_pad;
-# endif
-} lwt_event_t;
-#endif /* !KLWT_SUPPORT */
-
-#if LWT_SUPPORT
-# if !KLWT_SUPPORT
-
-typedef struct _lwt_page {
- struct list_head lwtp_list;
- struct page *lwtp_page;
- lwt_event_t *lwtp_events;
-} lwt_page_t;
-
-typedef struct {
- int lwtc_current_index;
- lwt_page_t *lwtc_current_page;
-} lwt_cpu_t;
-
-extern int lwt_enabled;
-extern lwt_cpu_t lwt_cpus[];
-
-/* Note that we _don't_ define LWT_EVENT at all if LWT_SUPPORT isn't set.
- * This stuff is meant for finding specific problems; it never stays in
- * production code... */
-
-#define LWTSTR(n) #n
-#define LWTWHERE(f,l) f ":" LWTSTR(l)
-#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof (lwt_event_t))
-
-#define LWT_EVENT(p1, p2, p3, p4) \
-do { \
- unsigned long flags; \
- lwt_cpu_t *cpu; \
- lwt_page_t *p; \
- lwt_event_t *e; \
- \
- if (lwt_enabled) { \
- local_irq_save (flags); \
- \
- cpu = &lwt_cpus[smp_processor_id()]; \
- p = cpu->lwtc_current_page; \
- e = &p->lwtp_events[cpu->lwtc_current_index++]; \
- \
- if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \
- cpu->lwtc_current_page = \
- list_entry (p->lwtp_list.next, \
- lwt_page_t, lwtp_list); \
- cpu->lwtc_current_index = 0; \
- } \
- \
- e->lwte_when = get_cycles(); \
- e->lwte_where = LWTWHERE(__FILE__,__LINE__); \
- e->lwte_task = current; \
- e->lwte_p1 = (long)(p1); \
- e->lwte_p2 = (long)(p2); \
- e->lwte_p3 = (long)(p3); \
- e->lwte_p4 = (long)(p4); \
- \
- local_irq_restore (flags); \
- } \
-} while (0)
-
-#endif /* !KLWT_SUPPORT */
-
-extern int lwt_init (void);
-extern void lwt_fini (void);
-extern int lwt_lookup_string (int *size, char *knlptr,
- char *usrptr, int usrsize);
-extern int lwt_control (int enable, int clear);
-extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
- void *user_ptr, int user_size);
-#endif /* LWT_SUPPORT */
-
-/* ------------------------------------------------------------------ */
-
-#define IOCTL_LIBCFS_TYPE long
-
-#ifdef __CYGWIN__
-# ifndef BITS_PER_LONG
-# define BITS_PER_LONG 64
-# endif
-#endif
-
-# define LI_POISON 0x5a5a5a5a
-#if BITS_PER_LONG > 32
-# define LL_POISON 0x5a5a5a5a5a5a5a5aL
-#else
-# define LL_POISON 0x5a5a5a5aL
-#endif
-# define LP_POISON ((void *)LL_POISON)
-
/* this is a bit chunky */
-#define _LWORDSIZE BITS_PER_LONG
-
# define LPU64 "%llu"
# define LPD64 "%lld"
# define LPX64 "%#llx"
@@ -218,24 +90,4 @@ extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
*/
# define LPPID "%d"
-
-#undef _LWORDSIZE
-
-/* compat macroses */
-
-
-#ifndef get_cpu
-# ifdef CONFIG_PREEMPT
-# define get_cpu() ({ preempt_disable(); smp_processor_id(); })
-# define put_cpu() preempt_enable()
-# else
-# define get_cpu() smp_processor_id()
-# define put_cpu()
-# endif
-#else
-#endif /* get_cpu & put_cpu */
-
-#define INIT_CTL_NAME(a)
-#define INIT_STRATEGY(a)
-
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index 60ecaf6..a7bca40 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -49,7 +49,6 @@
#include <linux/libcfs/linux/linux-mem.h>
#include <linux/libcfs/linux/linux-prim.h>
#include <linux/libcfs/linux/linux-lock.h>
-#include <linux/libcfs/linux/linux-fs.h>
#include <linux/libcfs/linux/linux-tcpip.h>
#include <linux/libcfs/linux/linux-bitops.h>
#include <linux/libcfs/linux/linux-types.h>
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h
deleted file mode 100644
index eebf138..0000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-fs.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_FS_H__
-#define __LIBCFS_LINUX_CFS_FS_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/mount.h>
-#include <linux/backing-dev.h>
-#include <linux/posix_acl_xattr.h>
-
-#define filp_size(f) \
- (i_size_read((f)->f_dentry->d_inode))
-#define filp_poff(f) \
- (&(f)->f_pos)
-
-# define do_fsync(fp, flag) \
- ((fp)->f_op->fsync(fp, 0, LLONG_MAX, flag))
-
-#define filp_read(fp, buf, size, pos) \
- ((fp)->f_op->read((fp), (buf), (size), pos))
-
-#define filp_write(fp, buf, size, pos) \
- ((fp)->f_op->write((fp), (buf), (size), pos))
-
-#define filp_fsync(fp) \
- do_fsync(fp, 1)
-
-#define flock_type(fl) ((fl)->fl_type)
-#define flock_set_type(fl, type) do { (fl)->fl_type = (type); } while (0)
-#define flock_pid(fl) ((fl)->fl_pid)
-#define flock_set_pid(fl, pid) do { (fl)->fl_pid = (pid); } while (0)
-#define flock_start(fl) ((fl)->fl_start)
-#define flock_set_start(fl, st) do { (fl)->fl_start = (st); } while (0)
-#define flock_end(fl) ((fl)->fl_end)
-#define flock_set_end(fl, end) do { (fl)->fl_end = (end); } while (0)
-
-#ifndef IFSHIFT
-#define IFSHIFT 12
-#endif
-
-#ifndef IFTODT
-#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
-#endif
-#ifndef DTTOIF
-#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
-#endif
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
index 1ec4ca1..2aeff27 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
@@ -47,7 +47,6 @@
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index bf30104..3ac2bb5 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -650,12 +650,13 @@ extern lnet_ni_t *lnet_net2ni(__u32 net);
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when);
-int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid);
+int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
+ unsigned int priority);
int lnet_check_routes(void);
int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive);
+ lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
void lnet_proc_init(void);
void lnet_proc_fini(void);
int lnet_rtrpools_alloc(int im_a_router);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index e579e7e..dd8edcf 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -478,7 +478,6 @@ typedef struct lnet_peer {
lnet_rc_data_t *lp_rcd; /* router checker state */
} lnet_peer_t;
-
/* peer hash size */
#define LNET_PEER_HASH_BITS 9
#define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
@@ -504,6 +503,7 @@ typedef struct {
int lr_seq; /* sequence for round-robin */
unsigned int lr_downis; /* number of down NIs */
unsigned int lr_hops; /* how far I am */
+ unsigned int lr_priority; /* route priority */
} lnet_route_t;
#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 4f63b7a..c833ce8 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -383,14 +383,6 @@ typedef enum {
typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
#define LNET_SEQ_GT(a,b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
-/* XXX
- * cygwin need the pragma line, not clear if it's needed in other places.
- * checking!!!
- */
-#ifdef __CYGWIN__
-#pragma pack(push, 4)
-#endif
-
/**
* Information about an event on a MD.
*/
@@ -462,9 +454,6 @@ typedef struct {
*/
volatile lnet_seq_t sequence;
} lnet_event_t;
-#ifdef __CYGWIN__
-#pragma pop
-#endif
/**
* Event queue handler function type.
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 86397f9..644a000 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -3230,7 +3230,6 @@ void __exit
kiblnd_module_fini (void)
{
lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
}
int __init
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 938df0c..ce05d55 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -50,7 +50,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/list.h>
@@ -106,9 +105,6 @@ typedef struct
int *kib_fmr_pool_size; /* # FMRs in pool */
int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
int *kib_fmr_cache; /* enable FMR pool cache? */
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
- ctl_table_header_t *kib_sysctl; /* sysctl interface */
-#endif
int *kib_require_priv_port;/* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
/* # threads on each CPT */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 26b49a2..9364863 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,8 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (vaddr >= VMALLOC_START &&
- vaddr < VMALLOC_END) {
+ if (is_vmalloc_addr(vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 92dc567..cefdfb6 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -41,95 +41,95 @@
#include "o2iblnd.h"
static int service = 987;
-CFS_MODULE_PARM(service, "i", int, 0444,
- "service number (within RDMA_PS_TCP)");
+module_param(service, int, 0444);
+MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
static int cksum = 0;
-CFS_MODULE_PARM(cksum, "i", int, 0644,
- "set non-zero to enable message (not RDMA) checksums");
+module_param(cksum, int, 0644);
+MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
static int timeout = 50;
-CFS_MODULE_PARM(timeout, "i", int, 0644,
- "timeout (seconds)");
+module_param(timeout, int, 0644);
+MODULE_PARM_DESC(timeout, "timeout (seconds)");
/* Number of threads in each scheduler pool which is percpt,
* we will estimate reasonable value based on CPUs if it's set to zero. */
static int nscheds;
-CFS_MODULE_PARM(nscheds, "i", int, 0444,
- "number of threads in each scheduler pool");
+module_param(nscheds, int, 0444);
+MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int ntx = 512;
-CFS_MODULE_PARM(ntx, "i", int, 0444,
- "# of message descriptors allocated for each pool");
+module_param(ntx, int, 0444);
+MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
/* NB: this value is shared by all CPTs */
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int peer_credits = 8;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# concurrent sends to 1 peer");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int peer_credits_hiw = 0;
-CFS_MODULE_PARM(peer_credits_hiw, "i", int, 0444,
- "when eagerly to return credits");
+module_param(peer_credits_hiw, int, 0444);
+MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# per-peer router buffer credits");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
static int peer_timeout = 180;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
static char *ipif_name = "ib0";
-CFS_MODULE_PARM(ipif_name, "s", charp, 0444,
- "IPoIB interface name");
+module_param(ipif_name, charp, 0444);
+MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
static int retry_count = 5;
-CFS_MODULE_PARM(retry_count, "i", int, 0644,
- "Retransmissions when no ACK received");
+module_param(retry_count, int, 0644);
+MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
static int rnr_retry_count = 6;
-CFS_MODULE_PARM(rnr_retry_count, "i", int, 0644,
- "RNR retransmissions");
+module_param(rnr_retry_count, int, 0644);
+MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
static int keepalive = 100;
-CFS_MODULE_PARM(keepalive, "i", int, 0644,
- "Idle time in seconds before sending a keepalive");
+module_param(keepalive, int, 0644);
+MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
static int ib_mtu = 0;
-CFS_MODULE_PARM(ib_mtu, "i", int, 0444,
- "IB MTU 256/512/1024/2048/4096");
+module_param(ib_mtu, int, 0444);
+MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
static int concurrent_sends = 0;
-CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
- "send work-queue sizing");
+module_param(concurrent_sends, int, 0444);
+MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
static int map_on_demand = 0;
-CFS_MODULE_PARM(map_on_demand, "i", int, 0444,
- "map on demand");
+module_param(map_on_demand, int, 0444);
+MODULE_PARM_DESC(map_on_demand, "map on demand");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int fmr_pool_size = 512;
-CFS_MODULE_PARM(fmr_pool_size, "i", int, 0444,
- "size of fmr pool on each CPT (>= ntx / 4)");
+module_param(fmr_pool_size, int, 0444);
+MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int fmr_flush_trigger = 384;
-CFS_MODULE_PARM(fmr_flush_trigger, "i", int, 0444,
- "# dirty FMRs that triggers pool flush");
+module_param(fmr_flush_trigger, int, 0444);
+MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
static int fmr_cache = 1;
-CFS_MODULE_PARM(fmr_cache, "i", int, 0444,
- "non-zero to enable FMR caching");
+module_param(fmr_cache, int, 0444);
+MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int pmr_pool_size = 512;
-CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
- "size of MR cache pmr pool on each CPT");
+module_param(pmr_pool_size, int, 0444);
+MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT");
/*
* 0: disable failover
@@ -137,17 +137,17 @@ CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
* 2: force to failover (for debug)
*/
static int dev_failover = 0;
-CFS_MODULE_PARM(dev_failover, "i", int, 0444,
- "HCA failover for bonding (0 off, 1 on, other values reserved)");
+module_param(dev_failover, int, 0444);
+MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
static int require_privileged_port = 0;
-CFS_MODULE_PARM(require_privileged_port, "i", int, 0644,
- "require privileged port when accepting connection");
+module_param(require_privileged_port, int, 0644);
+MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
static int use_privileged_port = 1;
-CFS_MODULE_PARM(use_privileged_port, "i", int, 0644,
- "use privileged port when initiating connection");
+module_param(use_privileged_port, int, 0644);
+MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
kib_tunables_t kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
@@ -176,261 +176,6 @@ kib_tunables_t kiblnd_tunables = {
.kib_nscheds = &nscheds
};
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
-
-static char ipif_basename_space[32];
-
-
-enum {
- O2IBLND_SERVICE = 1,
- O2IBLND_CKSUM,
- O2IBLND_TIMEOUT,
- O2IBLND_NTX,
- O2IBLND_CREDITS,
- O2IBLND_PEER_TXCREDITS,
- O2IBLND_PEER_CREDITS_HIW,
- O2IBLND_PEER_RTRCREDITS,
- O2IBLND_PEER_TIMEOUT,
- O2IBLND_IPIF_BASENAME,
- O2IBLND_RETRY_COUNT,
- O2IBLND_RNR_RETRY_COUNT,
- O2IBLND_KEEPALIVE,
- O2IBLND_CONCURRENT_SENDS,
- O2IBLND_IB_MTU,
- O2IBLND_MAP_ON_DEMAND,
- O2IBLND_FMR_POOL_SIZE,
- O2IBLND_FMR_FLUSH_TRIGGER,
- O2IBLND_FMR_CACHE,
- O2IBLND_PMR_POOL_SIZE,
- O2IBLND_DEV_FAILOVER
-};
-
-static ctl_table_t kiblnd_ctl_table[] = {
- {
- .ctl_name = O2IBLND_SERVICE,
- .procname = "service",
- .data = &service,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_CKSUM,
- .procname = "cksum",
- .data = &cksum,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_TIMEOUT,
- .procname = "timeout",
- .data = &timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_NTX,
- .procname = "ntx",
- .data = &ntx,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_CREDITS,
- .procname = "credits",
- .data = &credits,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_TXCREDITS,
- .procname = "peer_credits",
- .data = &peer_credits,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_CREDITS_HIW,
- .procname = "peer_credits_hiw",
- .data = &peer_credits_hiw,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_RTRCREDITS,
- .procname = "peer_buffer_credits",
- .data = &peer_buffer_credits,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_TIMEOUT,
- .procname = "peer_timeout",
- .data = &peer_timeout,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_IPIF_BASENAME,
- .procname = "ipif_name",
- .data = ipif_basename_space,
- .maxlen = sizeof(ipif_basename_space),
- .mode = 0444,
- .proc_handler = &proc_dostring
- },
- {
- .ctl_name = O2IBLND_RETRY_COUNT,
- .procname = "retry_count",
- .data = &retry_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_RNR_RETRY_COUNT,
- .procname = "rnr_retry_count",
- .data = &rnr_retry_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_KEEPALIVE,
- .procname = "keepalive",
- .data = &keepalive,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_CONCURRENT_SENDS,
- .procname = "concurrent_sends",
- .data = &concurrent_sends,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_IB_MTU,
- .procname = "ib_mtu",
- .data = &ib_mtu,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_MAP_ON_DEMAND,
- .procname = "map_on_demand",
- .data = &map_on_demand,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
-
- {
- .ctl_name = O2IBLND_FMR_POOL_SIZE,
- .procname = "fmr_pool_size",
- .data = &fmr_pool_size,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_FMR_FLUSH_TRIGGER,
- .procname = "fmr_flush_trigger",
- .data = &fmr_flush_trigger,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_FMR_CACHE,
- .procname = "fmr_cache",
- .data = &fmr_cache,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PMR_POOL_SIZE,
- .procname = "pmr_pool_size",
- .data = &pmr_pool_size,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_DEV_FAILOVER,
- .procname = "dev_failover",
- .data = &dev_failover,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {0}
-};
-
-static ctl_table_t kiblnd_top_ctl_table[] = {
- {
- .ctl_name = CTL_O2IBLND,
- .procname = "o2iblnd",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = kiblnd_ctl_table
- },
- {0}
-};
-
-void
-kiblnd_initstrtunable(char *space, char *str, int size)
-{
- strncpy(space, str, size);
- space[size-1] = 0;
-}
-
-void
-kiblnd_sysctl_init (void)
-{
- kiblnd_initstrtunable(ipif_basename_space, ipif_name,
- sizeof(ipif_basename_space));
-
- kiblnd_tunables.kib_sysctl =
- register_sysctl_table(kiblnd_top_ctl_table);
-
- if (kiblnd_tunables.kib_sysctl == NULL)
- CWARN("Can't setup /proc tunables\n");
-}
-
-void
-kiblnd_sysctl_fini (void)
-{
- if (kiblnd_tunables.kib_sysctl != NULL)
- unregister_sysctl_table(kiblnd_tunables.kib_sysctl);
-}
-
-#else
-
-void
-kiblnd_sysctl_init (void)
-{
-}
-
-void
-kiblnd_sysctl_fini (void)
-{
-}
-
-#endif
-
int
kiblnd_tunables_init (void)
{
@@ -482,12 +227,5 @@ kiblnd_tunables_init (void)
*kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
}
- kiblnd_sysctl_init();
return 0;
}
-
-void
-kiblnd_tunables_fini (void)
-{
- kiblnd_sysctl_fini();
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 2ddc3aa..8f74d0b 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2866,7 +2866,6 @@ void __exit
ksocknal_module_fini (void)
{
lnet_unregister_lnd(&the_ksocklnd);
- ksocknal_tunables_fini();
}
int __init
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index b483e0c..df2be7a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -124,9 +124,6 @@ typedef struct
unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */
int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
- ctl_table_header_t *ksnd_sysctl; /* sysctl interface */
-#endif
} ksock_tunables_t;
typedef struct
@@ -592,9 +589,6 @@ extern int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem,
int *rxmem, int *nagle);
extern int ksocknal_tunables_init(void);
-extern void ksocknal_tunables_fini(void);
-extern int ksocknal_lib_tunables_init(void);
-extern void ksocknal_lib_tunables_fini(void);
extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index a1c6a51..80141aa 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -36,313 +36,6 @@
#include "socklnd.h"
-# if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
-
-
-enum {
- SOCKLND_TIMEOUT = 1,
- SOCKLND_CREDITS,
- SOCKLND_PEER_TXCREDITS,
- SOCKLND_PEER_RTRCREDITS,
- SOCKLND_PEER_TIMEOUT,
- SOCKLND_NCONNDS,
- SOCKLND_RECONNECTS_MIN,
- SOCKLND_RECONNECTS_MAX,
- SOCKLND_EAGER_ACK,
- SOCKLND_ZERO_COPY,
- SOCKLND_TYPED,
- SOCKLND_BULK_MIN,
- SOCKLND_RX_BUFFER_SIZE,
- SOCKLND_TX_BUFFER_SIZE,
- SOCKLND_NAGLE,
- SOCKLND_IRQ_AFFINITY,
- SOCKLND_ROUND_ROBIN,
- SOCKLND_KEEPALIVE,
- SOCKLND_KEEPALIVE_IDLE,
- SOCKLND_KEEPALIVE_COUNT,
- SOCKLND_KEEPALIVE_INTVL,
- SOCKLND_BACKOFF_INIT,
- SOCKLND_BACKOFF_MAX,
- SOCKLND_PROTOCOL,
- SOCKLND_ZERO_COPY_RECV,
- SOCKLND_ZERO_COPY_RECV_MIN_NFRAGS
-};
-
-static ctl_table_t ksocknal_ctl_table[] = {
- {
- .ctl_name = SOCKLND_TIMEOUT,
- .procname = "timeout",
- .data = &ksocknal_tunables.ksnd_timeout,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_CREDITS,
- .procname = "credits",
- .data = &ksocknal_tunables.ksnd_credits,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_PEER_TXCREDITS,
- .procname = "peer_credits",
- .data = &ksocknal_tunables.ksnd_peertxcredits,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_PEER_RTRCREDITS,
- .procname = "peer_buffer_credits",
- .data = &ksocknal_tunables.ksnd_peerrtrcredits,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_PEER_TIMEOUT,
- .procname = "peer_timeout",
- .data = &ksocknal_tunables.ksnd_peertimeout,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_NCONNDS,
- .procname = "nconnds",
- .data = &ksocknal_tunables.ksnd_nconnds,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_RECONNECTS_MIN,
- .procname = "min_reconnectms",
- .data = &ksocknal_tunables.ksnd_min_reconnectms,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_RECONNECTS_MAX,
- .procname = "max_reconnectms",
- .data = &ksocknal_tunables.ksnd_max_reconnectms,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_EAGER_ACK,
- .procname = "eager_ack",
- .data = &ksocknal_tunables.ksnd_eager_ack,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_ZERO_COPY,
- .procname = "zero_copy",
- .data = &ksocknal_tunables.ksnd_zc_min_payload,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_ZERO_COPY_RECV,
- .procname = "zero_copy_recv",
- .data = &ksocknal_tunables.ksnd_zc_recv,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
-
- {
- .ctl_name = SOCKLND_ZERO_COPY_RECV_MIN_NFRAGS,
- .procname = "zero_copy_recv",
- .data = &ksocknal_tunables.ksnd_zc_recv_min_nfrags,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_TYPED,
- .procname = "typed",
- .data = &ksocknal_tunables.ksnd_typed_conns,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_BULK_MIN,
- .procname = "min_bulk",
- .data = &ksocknal_tunables.ksnd_min_bulk,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_RX_BUFFER_SIZE,
- .procname = "rx_buffer_size",
- .data = &ksocknal_tunables.ksnd_rx_buffer_size,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_TX_BUFFER_SIZE,
- .procname = "tx_buffer_size",
- .data = &ksocknal_tunables.ksnd_tx_buffer_size,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_NAGLE,
- .procname = "nagle",
- .data = &ksocknal_tunables.ksnd_nagle,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_ROUND_ROBIN,
- .procname = "round_robin",
- .data = &ksocknal_tunables.ksnd_round_robin,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE,
- .procname = "keepalive",
- .data = &ksocknal_tunables.ksnd_keepalive,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE_IDLE,
- .procname = "keepalive_idle",
- .data = &ksocknal_tunables.ksnd_keepalive_idle,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE_COUNT,
- .procname = "keepalive_count",
- .data = &ksocknal_tunables.ksnd_keepalive_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE_INTVL,
- .procname = "keepalive_intvl",
- .data = &ksocknal_tunables.ksnd_keepalive_intvl,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
-#if SOCKNAL_VERSION_DEBUG
- {
- .ctl_name = SOCKLND_PROTOCOL,
- .procname = "protocol",
- .data = &ksocknal_tunables.ksnd_protocol,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
-#endif
- {0}
-};
-
-
-ctl_table_t ksocknal_top_ctl_table[] = {
- {
- .ctl_name = CTL_SOCKLND,
- .procname = "socknal",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ksocknal_ctl_table
- },
- { 0 }
-};
-
-int
-ksocknal_lib_tunables_init ()
-{
- if (!*ksocknal_tunables.ksnd_typed_conns) {
- int rc = -EINVAL;
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol < 3)
- rc = 0;
-#endif
- if (rc != 0) {
- CERROR("Protocol V3.x MUST have typed connections\n");
- return rc;
- }
- }
-
- if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags < 2)
- *ksocknal_tunables.ksnd_zc_recv_min_nfrags = 2;
- if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags > LNET_MAX_IOV)
- *ksocknal_tunables.ksnd_zc_recv_min_nfrags = LNET_MAX_IOV;
-
- ksocknal_tunables.ksnd_sysctl =
- register_sysctl_table(ksocknal_top_ctl_table);
-
- if (ksocknal_tunables.ksnd_sysctl == NULL)
- CWARN("Can't setup /proc tunables\n");
-
- return 0;
-}
-
-void
-ksocknal_lib_tunables_fini(void)
-{
- if (ksocknal_tunables.ksnd_sysctl != NULL)
- unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
-}
-#else
-int
-ksocknal_lib_tunables_init(void)
-{
- return 0;
-}
-
-void
-ksocknal_lib_tunables_fini(void)
-{
-}
-#endif /* # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM */
-
int
ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
{
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
index 1cfc1b1..025cb65 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
@@ -54,7 +54,6 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/list.h>
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index 8a474f6..54c0019 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -22,123 +22,123 @@
#include "socklnd.h"
static int sock_timeout = 50;
-CFS_MODULE_PARM(sock_timeout, "i", int, 0644,
- "dead socket timeout (seconds)");
+module_param(sock_timeout, int, 0644);
+MODULE_PARM_DESC(sock_timeout, "dead socket timeout (seconds)");
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int peer_credits = 8;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# concurrent sends to 1 peer");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# per-peer router buffer credits");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
static int peer_timeout = 180;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
/* Number of daemons in each thread pool which is percpt,
* we will estimate reasonable value based on CPUs if it's not set. */
static unsigned int nscheds;
-CFS_MODULE_PARM(nscheds, "i", int, 0444,
- "# scheduler daemons in each pool while starting");
+module_param(nscheds, int, 0444);
+MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
static int nconnds = 4;
-CFS_MODULE_PARM(nconnds, "i", int, 0444,
- "# connection daemons while starting");
+module_param(nconnds, int, 0444);
+MODULE_PARM_DESC(nconnds, "# connection daemons while starting");
static int nconnds_max = 64;
-CFS_MODULE_PARM(nconnds_max, "i", int, 0444,
- "max # connection daemons");
+module_param(nconnds_max, int, 0444);
+MODULE_PARM_DESC(nconnds_max, "max # connection daemons");
static int min_reconnectms = 1000;
-CFS_MODULE_PARM(min_reconnectms, "i", int, 0644,
- "min connection retry interval (mS)");
+module_param(min_reconnectms, int, 0644);
+MODULE_PARM_DESC(min_reconnectms, "min connection retry interval (mS)");
static int max_reconnectms = 60000;
-CFS_MODULE_PARM(max_reconnectms, "i", int, 0644,
- "max connection retry interval (mS)");
+module_param(max_reconnectms, int, 0644);
+MODULE_PARM_DESC(max_reconnectms, "max connection retry interval (mS)");
# define DEFAULT_EAGER_ACK 0
static int eager_ack = DEFAULT_EAGER_ACK;
-CFS_MODULE_PARM(eager_ack, "i", int, 0644,
- "send tcp ack packets eagerly");
+module_param(eager_ack, int, 0644);
+MODULE_PARM_DESC(eager_ack, "send tcp ack packets eagerly");
static int typed_conns = 1;
-CFS_MODULE_PARM(typed_conns, "i", int, 0444,
- "use different sockets for bulk");
+module_param(typed_conns, int, 0444);
+MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
static int min_bulk = (1<<10);
-CFS_MODULE_PARM(min_bulk, "i", int, 0644,
- "smallest 'large' message");
+module_param(min_bulk, int, 0644);
+MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
# define DEFAULT_BUFFER_SIZE 0
static int tx_buffer_size = DEFAULT_BUFFER_SIZE;
-CFS_MODULE_PARM(tx_buffer_size, "i", int, 0644,
- "socket tx buffer size (0 for system default)");
+module_param(tx_buffer_size, int, 0644);
+MODULE_PARM_DESC(tx_buffer_size, "socket tx buffer size (0 for system default)");
static int rx_buffer_size = DEFAULT_BUFFER_SIZE;
-CFS_MODULE_PARM(rx_buffer_size, "i", int, 0644,
- "socket rx buffer size (0 for system default)");
+module_param(rx_buffer_size, int, 0644);
+MODULE_PARM_DESC(rx_buffer_size, "socket rx buffer size (0 for system default)");
static int nagle = 0;
-CFS_MODULE_PARM(nagle, "i", int, 0644,
- "enable NAGLE?");
+module_param(nagle, int, 0644);
+MODULE_PARM_DESC(nagle, "enable NAGLE?");
static int round_robin = 1;
-CFS_MODULE_PARM(round_robin, "i", int, 0644,
- "Round robin for multiple interfaces");
+module_param(round_robin, int, 0644);
+MODULE_PARM_DESC(round_robin, "Round robin for multiple interfaces");
static int keepalive = 30;
-CFS_MODULE_PARM(keepalive, "i", int, 0644,
- "# seconds before send keepalive");
+module_param(keepalive, int, 0644);
+MODULE_PARM_DESC(keepalive, "# seconds before send keepalive");
static int keepalive_idle = 30;
-CFS_MODULE_PARM(keepalive_idle, "i", int, 0644,
- "# idle seconds before probe");
+module_param(keepalive_idle, int, 0644);
+MODULE_PARM_DESC(keepalive_idle, "# idle seconds before probe");
#define DEFAULT_KEEPALIVE_COUNT 5
static int keepalive_count = DEFAULT_KEEPALIVE_COUNT;
-CFS_MODULE_PARM(keepalive_count, "i", int, 0644,
- "# missed probes == dead");
+module_param(keepalive_count, int, 0644);
+MODULE_PARM_DESC(keepalive_count, "# missed probes == dead");
static int keepalive_intvl = 5;
-CFS_MODULE_PARM(keepalive_intvl, "i", int, 0644,
- "seconds between probes");
+module_param(keepalive_intvl, int, 0644);
+MODULE_PARM_DESC(keepalive_intvl, "seconds between probes");
static int enable_csum = 0;
-CFS_MODULE_PARM(enable_csum, "i", int, 0644,
- "enable check sum");
+module_param(enable_csum, int, 0644);
+MODULE_PARM_DESC(enable_csum, "enable check sum");
static int inject_csum_error = 0;
-CFS_MODULE_PARM(inject_csum_error, "i", int, 0644,
- "set non-zero to inject a checksum error");
+module_param(inject_csum_error, int, 0644);
+MODULE_PARM_DESC(inject_csum_error, "set non-zero to inject a checksum error");
static int nonblk_zcack = 1;
-CFS_MODULE_PARM(nonblk_zcack, "i", int, 0644,
- "always send ZC-ACK on non-blocking connection");
+module_param(nonblk_zcack, int, 0644);
+MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection");
static unsigned int zc_min_payload = (16 << 10);
-CFS_MODULE_PARM(zc_min_payload, "i", int, 0644,
- "minimum payload size to zero copy");
+module_param(zc_min_payload, int, 0644);
+MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy");
static unsigned int zc_recv = 0;
-CFS_MODULE_PARM(zc_recv, "i", int, 0644,
- "enable ZC recv for Chelsio driver");
+module_param(zc_recv, int, 0644);
+MODULE_PARM_DESC(zc_recv, "enable ZC recv for Chelsio driver");
static unsigned int zc_recv_min_nfrags = 16;
-CFS_MODULE_PARM(zc_recv_min_nfrags, "i", int, 0644,
- "minimum # of fragments to enable ZC recv");
+module_param(zc_recv_min_nfrags, int, 0644);
+MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
#if SOCKNAL_VERSION_DEBUG
static int protocol = 3;
-CFS_MODULE_PARM(protocol, "i", int, 0644,
- "protocol version");
+module_param(protocol, int, 0644);
+MODULE_PARM_DESC(protocol, "protocol version");
#endif
ksock_tunables_t ksocknal_tunables;
@@ -181,18 +181,8 @@ int ksocknal_tunables_init(void)
ksocknal_tunables.ksnd_protocol = &protocol;
#endif
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
- ksocknal_tunables.ksnd_sysctl = NULL;
-#endif
-
if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10))
*ksocknal_tunables.ksnd_zc_min_payload = (2 << 10);
- /* initialize platform-sepcific tunables */
- return ksocknal_lib_tunables_init();
+ return 0;
};
-
-void ksocknal_tunables_fini(void)
-{
- ksocknal_lib_tunables_fini();
-}
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 92c60a7..cb2ecd7 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -64,14 +64,14 @@ lnet_accept_magic(__u32 magic, __u32 constant)
static char *accept = "secure";
-CFS_MODULE_PARM(accept, "s", charp, 0444,
- "Accept connections (secure|all|none)");
-CFS_MODULE_PARM(accept_port, "i", int, 0444,
- "Acceptor's port (same on all nodes)");
-CFS_MODULE_PARM(accept_backlog, "i", int, 0444,
- "Acceptor's listen backlog");
-CFS_MODULE_PARM(accept_timeout, "i", int, 0644,
- "Acceptor's timeout (seconds)");
+module_param(accept, charp, 0444);
+MODULE_PARM_DESC(accept, "Accept connections (secure|all|none)");
+module_param(accept_port, int, 0444);
+MODULE_PARM_DESC(accept_port, "Acceptor's port (same on all nodes)");
+module_param(accept_backlog, int, 0444);
+MODULE_PARM_DESC(accept_backlog, "Acceptor's listen backlog");
+module_param(accept_timeout, int, 0644);
+MODULE_PARM_DESC(accept_timeout, "Acceptor's timeout (seconds)");
static char *accept_type;
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 160a429..c562ff3 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -45,20 +45,20 @@ EXPORT_SYMBOL(the_lnet);
static char *ip2nets = "";
-CFS_MODULE_PARM(ip2nets, "s", charp, 0444,
- "LNET network <- IP table");
+module_param(ip2nets, charp, 0444);
+MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
static char *networks = "";
-CFS_MODULE_PARM(networks, "s", charp, 0444,
- "local networks");
+module_param(networks, charp, 0444);
+MODULE_PARM_DESC(networks, "local networks");
static char *routes = "";
-CFS_MODULE_PARM(routes, "s", charp, 0444,
- "routes to non-local networks");
+module_param(routes, charp, 0444);
+MODULE_PARM_DESC(routes, "routes to non-local networks");
static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
-CFS_MODULE_PARM(rnet_htable_size, "i", int, 0444,
- "size of remote network hash table");
+module_param(rnet_htable_size, int, 0444);
+MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
char *
lnet_get_routes(void)
@@ -1436,7 +1436,7 @@ LNetCtl(unsigned int cmd, void *arg)
case IOC_LIBCFS_ADD_ROUTE:
rc = lnet_add_route(data->ioc_net, data->ioc_count,
- data->ioc_nid);
+ data->ioc_nid, data->ioc_priority);
return (rc != 0) ? rc : lnet_check_routes();
case IOC_LIBCFS_DEL_ROUTE:
@@ -1445,7 +1445,8 @@ LNetCtl(unsigned int cmd, void *arg)
case IOC_LIBCFS_GET_ROUTE:
return lnet_get_route(data->ioc_count,
&data->ioc_net, &data->ioc_count,
- &data->ioc_nid, &data->ioc_flags);
+ &data->ioc_nid, &data->ioc_flags,
+ &data->ioc_priority);
case IOC_LIBCFS_NOTIFY_ROUTER:
return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
cfs_time_current() -
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index de323f7..6a07b0a 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -603,6 +603,37 @@ lnet_parse_hops(char *str, unsigned int *hops)
*hops > 0 && *hops < 256);
}
+#define LNET_PRIORITY_SEPARATOR (':')
+
+int
+lnet_parse_priority(char *str, unsigned int *priority, char **token)
+{
+ int nob;
+ char *sep;
+ int len;
+
+ sep = strchr(str, LNET_PRIORITY_SEPARATOR);
+ if (sep == NULL) {
+ *priority = 0;
+ return 0;
+ }
+ len = strlen(sep + 1);
+
+ if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) {
+ /* Update the caller's token pointer so it treats the found
+ priority as the token to report in the error message. */
+ *token += sep - str + 1;
+ return -1;
+ }
+
+ CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob);
+
+ /*
+ * Change priority separator to \0 to be able to parse NID
+ */
+ *sep = '\0';
+ return 0;
+}
int
lnet_parse_route(char *str, int *im_a_router)
@@ -624,6 +655,7 @@ lnet_parse_route(char *str, int *im_a_router)
int myrc = -1;
unsigned int hops;
int got_hops = 0;
+ unsigned int priority = 0;
INIT_LIST_HEAD(&gateways);
INIT_LIST_HEAD(&nets);
@@ -691,6 +723,11 @@ lnet_parse_route(char *str, int *im_a_router)
LNET_NETTYP(net) == LOLND)
goto token_error;
} else {
+ rc = lnet_parse_priority(ltb->ltb_text,
+ &priority, &token);
+ if (rc < 0)
+ goto token_error;
+
nid = libcfs_str2nid(ltb->ltb_text);
if (nid == LNET_NID_ANY ||
LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
@@ -720,7 +757,7 @@ lnet_parse_route(char *str, int *im_a_router)
continue;
}
- rc = lnet_add_route(net, hops, nid);
+ rc = lnet_add_route(net, hops, nid, priority);
if (rc != 0) {
CERROR("Can't create route to %s via %s\n",
libcfs_net2str(net),
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index b6f8ad3..bbf43ae 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -43,8 +43,8 @@
#include <linux/lnet/lib-lnet.h>
static int local_nid_dist_zero = 1;
-CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
- "Reserved");
+module_param(local_nid_dist_zero, int, 0444);
+MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
int
lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
@@ -1074,6 +1074,12 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
lnet_peer_t *p1 = r1->lr_gateway;
lnet_peer_t *p2 = r2->lr_gateway;
+ if (r1->lr_priority < r2->lr_priority)
+ return 1;
+
+ if (r1->lr_priority > r2->lr_priority)
+ return -1;
+
if (r1->lr_hops < r2->lr_hops)
return 1;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 61ae88b..761f1e1 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -43,7 +43,7 @@
#include <linux/lnet/lib-lnet.h>
void
-lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
+lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
{
memset(ev, 0, sizeof(*ev));
@@ -362,7 +362,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
int rc;
int status = msg->msg_ev.status;
- LASSERT (msg->msg_onactivelist);
+ LASSERT(msg->msg_onactivelist);
if (status == 0 && msg->msg_ack) {
/* Only send an ACK if the PUT completed successfully */
@@ -432,7 +432,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
}
void
-lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
+lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
{
struct lnet_msg_container *container;
int my_slot;
@@ -440,7 +440,7 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
int rc;
int i;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
if (msg == NULL)
return;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 9b9e7d31..6fffd5e 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -40,8 +40,8 @@
/* NB: add /proc interfaces in upcoming patches */
int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
-CFS_MODULE_PARM(portal_rotor, "i", int, 0644,
- "redirect PUTs to different cpu-partitions");
+module_param(portal_rotor, int, 0644);
+MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
static int
lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 6db8774..3bd42a4 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -38,8 +38,8 @@
#include <linux/lnet/lib-lnet.h>
static int config_on_load;
-CFS_MODULE_PARM(config_on_load, "i", int, 0444,
- "configure network at module load");
+module_param(config_on_load, int, 0444);
+MODULE_PARM_DESC(config_on_load, "configure network at module load");
static struct mutex lnet_config_mutex;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index a326ce0..d1ee442 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -34,25 +34,25 @@
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
static char *forwarding = "";
-CFS_MODULE_PARM(forwarding, "s", charp, 0444,
- "Explicitly enable/disable forwarding between networks");
+module_param(forwarding, charp, 0444);
+MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
static int tiny_router_buffers;
-CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
- "# of 0 payload messages to buffer in the router");
+module_param(tiny_router_buffers, int, 0444);
+MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
static int small_router_buffers;
-CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
- "# of small (1 page) messages to buffer in the router");
+module_param(small_router_buffers, int, 0444);
+MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
static int large_router_buffers;
-CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
- "# of large messages to buffer in the router");
+module_param(large_router_buffers, int, 0444);
+MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# router buffer credits per peer");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
static int auto_down = 1;
-CFS_MODULE_PARM(auto_down, "i", int, 0444,
- "Automatically mark peers down on comms error");
+module_param(auto_down, int, 0444);
+MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
int
lnet_peer_buffer_credits(lnet_ni_t *ni)
@@ -81,24 +81,24 @@ lnet_peer_buffer_credits(lnet_ni_t *ni)
#endif
static int check_routers_before_use = 0;
-CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
- "Assume routers are down and ping them before use");
+module_param(check_routers_before_use, int, 0444);
+MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
static int avoid_asym_router_failure = 1;
-CFS_MODULE_PARM(avoid_asym_router_failure, "i", int, 0644,
- "Avoid asymmetrical router failures (0 to disable)");
+module_param(avoid_asym_router_failure, int, 0644);
+MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
static int dead_router_check_interval = 60;
-CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0644,
- "Seconds between dead router health checks (<= 0 to disable)");
+module_param(dead_router_check_interval, int, 0644);
+MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
static int live_router_check_interval = 60;
-CFS_MODULE_PARM(live_router_check_interval, "i", int, 0644,
- "Seconds between live router health checks (<= 0 to disable)");
+module_param(live_router_check_interval, int, 0644);
+MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
static int router_ping_timeout = 50;
-CFS_MODULE_PARM(router_ping_timeout, "i", int, 0644,
- "Seconds to wait for the reply to a router health query");
+module_param(router_ping_timeout, int, 0644);
+MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
int
lnet_peers_start_down(void)
@@ -301,7 +301,8 @@ lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
}
int
-lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
+lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
+ unsigned int priority)
{
struct list_head *e;
lnet_remotenet_t *rnet;
@@ -311,8 +312,8 @@ lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
int add_route;
int rc;
- CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
- libcfs_net2str(net), hops, libcfs_nid2str(gateway));
+ CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
+ libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
if (gateway == LNET_NID_ANY ||
LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
@@ -342,6 +343,7 @@ lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
rnet->lrn_net = net;
route->lr_hops = hops;
route->lr_net = net;
+ route->lr_priority = priority;
lnet_net_lock(LNET_LOCK_EX);
@@ -552,7 +554,7 @@ lnet_destroy_routes (void)
int
lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive)
+ lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
{
struct list_head *e1;
struct list_head *e2;
@@ -574,10 +576,11 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
lr_list);
if (idx-- == 0) {
- *net = rnet->lrn_net;
- *hops = route->lr_hops;
- *gateway = route->lr_gateway->lp_nid;
- *alive = route->lr_gateway->lp_alive;
+ *net = rnet->lrn_net;
+ *hops = route->lr_hops;
+ *priority = route->lr_priority;
+ *gateway = route->lr_gateway->lp_nid;
+ *alive = route->lr_gateway->lp_alive;
lnet_net_unlock(cpt);
return 0;
}
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 5e47de3..20d53e0 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -174,8 +174,8 @@ int LL_PROC_PROTO(proc_lnet_routes)
the_lnet.ln_routing ? "enabled" : "disabled");
LASSERT(tmpstr + tmpsiz - s > 0);
- s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %7s %s\n",
- "net", "hops", "state", "router");
+ s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
+ "net", "hops", "priority", "state", "router");
LASSERT(tmpstr + tmpsiz - s > 0);
lnet_net_lock(0);
@@ -229,14 +229,16 @@ int LL_PROC_PROTO(proc_lnet_routes)
}
if (route != NULL) {
- __u32 net = rnet->lrn_net;
- unsigned int hops = route->lr_hops;
- lnet_nid_t nid = route->lr_gateway->lp_nid;
- int alive = route->lr_gateway->lp_alive;
+ __u32 net = rnet->lrn_net;
+ unsigned int hops = route->lr_hops;
+ unsigned int priority = route->lr_priority;
+ lnet_nid_t nid = route->lr_gateway->lp_nid;
+ int alive = route->lr_gateway->lp_alive;
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-8s %4u %7s %s\n",
+ "%-8s %4u %8u %7s %s\n",
libcfs_net2str(net), hops,
+ priority,
alive ? "up" : "down",
libcfs_nid2str(nid));
LASSERT(tmpstr + tmpsiz - s > 0);
@@ -855,55 +857,46 @@ static ctl_table_t lnet_table[] = {
* to go via /proc for portability.
*/
{
- INIT_CTL_NAME(PSDEV_LNET_STATS)
.procname = "stats",
.mode = 0644,
.proc_handler = &proc_lnet_stats,
},
{
- INIT_CTL_NAME(PSDEV_LNET_ROUTES)
.procname = "routes",
.mode = 0444,
.proc_handler = &proc_lnet_routes,
},
{
- INIT_CTL_NAME(PSDEV_LNET_ROUTERS)
.procname = "routers",
.mode = 0444,
.proc_handler = &proc_lnet_routers,
},
{
- INIT_CTL_NAME(PSDEV_LNET_PEERS)
.procname = "peers",
.mode = 0444,
.proc_handler = &proc_lnet_peers,
},
{
- INIT_CTL_NAME(PSDEV_LNET_PEERS)
.procname = "buffers",
.mode = 0444,
.proc_handler = &proc_lnet_buffers,
},
{
- INIT_CTL_NAME(PSDEV_LNET_NIS)
.procname = "nis",
.mode = 0444,
.proc_handler = &proc_lnet_nis,
},
{
- INIT_CTL_NAME(PSDEV_LNET_PTL_ROTOR)
.procname = "portal_rotor",
.mode = 0644,
.proc_handler = &proc_lnet_portal_rotor,
},
{
- INIT_CTL_NAME(0)
}
};
static ctl_table_t top_table[] = {
{
- INIT_CTL_NAME(CTL_LNET)
.procname = "lnet",
.mode = 0555,
.data = NULL,
@@ -911,28 +904,23 @@ static ctl_table_t top_table[] = {
.child = lnet_table,
},
{
- INIT_CTL_NAME(0)
}
};
void
lnet_proc_init(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header == NULL)
lnet_table_header = register_sysctl_table(top_table);
-#endif
}
void
lnet_proc_fini(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header != NULL)
unregister_sysctl_table(lnet_table_header);
lnet_table_header = NULL;
-#endif
}
#else
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b7613c8..3f8020c 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -41,11 +41,12 @@
#include "selftest.h"
static int brw_srv_workitems = SFW_TEST_WI_MAX;
-CFS_MODULE_PARM(brw_srv_workitems, "i", int, 0644, "# BRW server workitems");
+module_param(brw_srv_workitems, int, 0644);
+MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems");
static int brw_inject_errors;
-CFS_MODULE_PARM(brw_inject_errors, "i", int, 0644,
- "# data errors to inject randomly, zero by default");
+module_param(brw_inject_errors, int, 0644);
+MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
static void
brw_client_fini(sfw_test_instance_t *tsi)
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index bce3d3b..68e1a17 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -96,11 +96,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
- if (args->lstio_ses_idp == NULL || /* address for ouput sid */
- args->lstio_ses_keyp == NULL || /* address for ouput key */
- args->lstio_ses_featp == NULL || /* address for ouput features */
+ if (args->lstio_ses_idp == NULL || /* address for output sid */
+ args->lstio_ses_keyp == NULL || /* address for output key */
+ args->lstio_ses_featp == NULL || /* address for output features */
args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */
- args->lstio_ses_namep == NULL || /* address for ouput name */
+ args->lstio_ses_namep == NULL || /* address for output name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -723,12 +723,12 @@ lst_stat_query_ioctl(lstio_stat_args_t *args)
int lst_test_add_ioctl(lstio_test_args_t *args)
{
- char *name;
- char *srcgrp = NULL;
- char *dstgrp = NULL;
- void *param = NULL;
- int ret = 0;
- int rc = -ENOMEM;
+ char *batch_name;
+ char *src_name = NULL;
+ char *dst_name = NULL;
+ void *param = NULL;
+ int ret = 0;
+ int rc = -ENOMEM;
if (args->lstio_tes_resultp == NULL ||
args->lstio_tes_retp == NULL ||
@@ -755,16 +755,16 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1);
- if (name == NULL)
+ LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
+ if (batch_name == NULL)
return rc;
- LIBCFS_ALLOC(srcgrp, args->lstio_tes_sgrp_nmlen + 1);
- if (srcgrp == NULL)
+ LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1);
+ if (src_name == NULL)
goto out;
- LIBCFS_ALLOC(dstgrp, args->lstio_tes_dgrp_nmlen + 1);
- if (dstgrp == NULL)
+ LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1);
+ if (dst_name == NULL)
goto out;
if (args->lstio_tes_param != NULL) {
@@ -774,39 +774,37 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
}
rc = -EFAULT;
- if (copy_from_user(name,
- args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- copy_from_user(srcgrp,
- args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- copy_from_user(dstgrp,
- args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
+ if (copy_from_user(batch_name, args->lstio_tes_bat_name,
+ args->lstio_tes_bat_nmlen) ||
+ copy_from_user(src_name, args->lstio_tes_sgrp_name,
+ args->lstio_tes_sgrp_nmlen) ||
+ copy_from_user(dst_name, args->lstio_tes_dgrp_name,
+ args->lstio_tes_dgrp_nmlen) ||
copy_from_user(param, args->lstio_tes_param,
args->lstio_tes_param_len))
goto out;
- rc = lstcon_test_add(name,
+ rc = lstcon_test_add(batch_name,
args->lstio_tes_type,
args->lstio_tes_loop,
args->lstio_tes_concur,
args->lstio_tes_dist, args->lstio_tes_span,
- srcgrp, dstgrp, param, args->lstio_tes_param_len,
+ src_name, dst_name, param,
+ args->lstio_tes_param_len,
&ret, args->lstio_tes_resultp);
if (ret != 0)
rc = (copy_to_user(args->lstio_tes_retp, &ret,
sizeof(ret))) ? -EFAULT : 0;
out:
- if (name != NULL)
- LIBCFS_FREE(name, args->lstio_tes_bat_nmlen + 1);
+ if (batch_name != NULL)
+ LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1);
- if (srcgrp != NULL)
- LIBCFS_FREE(srcgrp, args->lstio_tes_sgrp_nmlen + 1);
+ if (src_name != NULL)
+ LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1);
- if (dstgrp != NULL)
- LIBCFS_FREE(dstgrp, args->lstio_tes_dgrp_nmlen + 1);
+ if (dst_name != NULL)
+ LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1);
if (param != NULL)
LIBCFS_FREE(param, args->lstio_tes_param_len);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 9a52f25..53d5892 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -311,7 +311,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
sfw_abort_rpc(rpc);
- if (error != ETIMEDOUT)
+ if (error != ETIMEDOUT)
continue;
nd = crpc->crp_node;
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index f1152e4..2a8eddc 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -265,7 +265,7 @@ lstcon_group_decref(lstcon_group_t *grp)
}
static int
-lstcon_group_find(char *name, lstcon_group_t **grpp)
+lstcon_group_find(const char *name, lstcon_group_t **grpp)
{
lstcon_group_t *grp;
@@ -614,7 +614,7 @@ lstcon_group_del(char *name)
lstcon_group_put(grp);
/* -ref for session, it's destroyed,
- * status can't be rolled back, destroy group anway */
+ * status can't be rolled back, destroy group anyway */
lstcon_group_put(grp);
return rc;
@@ -831,7 +831,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
}
int
-lstcon_batch_find(char *name, lstcon_batch_t **batpp)
+lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
{
lstcon_batch_t *bat;
@@ -1237,41 +1237,77 @@ again:
goto again;
}
-int
-lstcon_test_add(char *name, int type, int loop, int concur,
- int dist, int span, char *src_name, char * dst_name,
- void *param, int paramlen, int *retp,
- struct list_head *result_up)
+static int
+lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
{
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_test_t *test = NULL;
- lstcon_batch_t *batch;
- int rc;
+ int rc;
- rc = lstcon_batch_find(name, &batch);
+ rc = lstcon_batch_find(name, batch);
if (rc != 0) {
CDEBUG(D_NET, "Can't find batch %s\n", name);
return rc;
}
- if (batch->bat_state != LST_BATCH_IDLE) {
+ if ((*batch)->bat_state != LST_BATCH_IDLE) {
CDEBUG(D_NET, "Can't change running batch %s\n", name);
- return rc;
+ return -EINVAL;
}
- rc = lstcon_group_find(src_name, &src_grp);
+ return 0;
+}
+
+static int
+lstcon_verify_group(const char *name, lstcon_group_t **grp)
+{
+ int rc;
+ lstcon_ndlink_t *ndl;
+
+ rc = lstcon_group_find(name, grp);
if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", src_name);
- goto out;
+ CDEBUG(D_NET, "can't find group %s\n", name);
+ return rc;
}
- rc = lstcon_group_find(dst_name, &dst_grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", dst_name);
- goto out;
+ list_for_each_entry(ndl, &(*grp)->grp_ndl_list, ndl_link) {
+ if (ndl->ndl_node->nd_state == LST_NODE_ACTIVE)
+ return 0;
}
+ CDEBUG(D_NET, "Group %s has no ACTIVE nodes\n", name);
+
+ return -EINVAL;
+}
+
+int
+lstcon_test_add(char *batch_name, int type, int loop,
+ int concur, int dist, int span,
+ char *src_name, char *dst_name,
+ void *param, int paramlen, int *retp,
+ struct list_head *result_up)
+{
+ lstcon_test_t *test = NULL;
+ int rc;
+ lstcon_group_t *src_grp = NULL;
+ lstcon_group_t *dst_grp = NULL;
+ lstcon_batch_t *batch = NULL;
+
+ /*
+ * verify that a batch of the given name exists, and the groups
+ * that will be part of the batch exist and have at least one
+ * active node
+ */
+ rc = lstcon_verify_batch(batch_name, &batch);
+ if (rc != 0)
+ goto out;
+
+ rc = lstcon_verify_group(src_name, &src_grp);
+ if (rc != 0)
+ goto out;
+
+ rc = lstcon_verify_group(dst_name, &dst_grp);
+ if (rc != 0)
+ goto out;
+
if (dst_grp->grp_userland)
*retp = 1;
@@ -1284,18 +1320,18 @@ lstcon_test_add(char *name, int type, int loop, int concur,
}
memset(test, 0, offsetof(lstcon_test_t, tes_param[paramlen]));
- test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
- test->tes_batch = batch;
- test->tes_type = type;
- test->tes_oneside = 0; /* TODO */
- test->tes_loop = loop;
+ test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
+ test->tes_batch = batch;
+ test->tes_type = type;
+ test->tes_oneside = 0; /* TODO */
+ test->tes_loop = loop;
test->tes_concur = concur;
- test->tes_stop_onerr = 1; /* TODO */
- test->tes_span = span;
- test->tes_dist = dist;
+ test->tes_stop_onerr = 1; /* TODO */
+ test->tes_span = span;
+ test->tes_dist = dist;
test->tes_cliidx = 0; /* just used for creating RPC */
- test->tes_src_grp = src_grp;
- test->tes_dst_grp = dst_grp;
+ test->tes_src_grp = src_grp;
+ test->tes_dst_grp = dst_grp;
INIT_LIST_HEAD(&test->tes_trans_list);
if (param != NULL) {
@@ -1310,7 +1346,8 @@ lstcon_test_add(char *name, int type, int loop, int concur,
if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
lstcon_trans_stat()->trs_fwk_errno != 0)
- CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, name);
+ CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
+ batch_name);
/* add to test list anyway, so user can check what's going on */
list_add_tail(&test->tes_link, &batch->bat_test_list);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index e61b266..393dc0f 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -100,7 +100,7 @@ typedef struct {
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /*** (tests ) batch descritptor */
+} lstcon_batch_t; /*** (tests ) batch descriptor */
typedef struct lstcon_test {
lstcon_tsb_hdr_t tes_hdr; /* test batch header */
@@ -224,9 +224,9 @@ extern int lstcon_group_stat(char *grp_name, int timeout,
struct list_head *result_up);
extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
int timeout, struct list_head *result_up);
-extern int lstcon_test_add(char *name, int type, int loop, int concur,
- int dist, int span, char *src_name, char * dst_name,
+extern int lstcon_test_add(char *batch_name, int type, int loop,
+ int concur, int dist, int span,
+ char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
struct list_head *result_up);
-
#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 483c785..050723a 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -46,12 +46,12 @@
lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
static int session_timeout = 100;
-CFS_MODULE_PARM(session_timeout, "i", int, 0444,
- "test session timeout in seconds (100 by default, 0 == never)");
+module_param(session_timeout, int, 0444);
+MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
static int rpc_timeout = 64;
-CFS_MODULE_PARM(rpc_timeout, "i", int, 0644,
- "rpc timeout in seconds (64 by default, 0 == never)");
+module_param(rpc_timeout, int, 0644);
+MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
#define sfw_unpack_id(id) \
do { \
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index f0f9194..750cac4 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -45,7 +45,8 @@
#define LST_PING_TEST_MAGIC 0xbabeface
int ping_srv_workitems = SFW_TEST_WI_MAX;
-CFS_MODULE_PARM(ping_srv_workitems, "i", int, 0644, "# PING server workitems");
+module_param(ping_srv_workitems, int, 0644);
+MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
typedef struct {
spinlock_t pnd_lock; /* serialize */
@@ -189,7 +190,7 @@ ping_server_handle(struct srpc_server_rpc *rpc)
LASSERT (reqstmsg->msg_type == srpc_service2request(sv->sv_id));
if (req->pnr_magic != LST_PING_TEST_MAGIC) {
- CERROR ("Unexpect magic %08x from %s\n",
+ CERROR ("Unexpected magic %08x from %s\n",
req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 7659a26..d838985 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -124,7 +124,6 @@ srpc_bulk_t *
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{
srpc_bulk_t *bk;
- struct page **pages;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
@@ -140,7 +139,6 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
- UNUSED(pages);
for (i = 0; i < bulk_npg; i++) {
struct page *pg;
@@ -718,7 +716,7 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
if (scd->scd_buf_adjust < 0 &&
scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
CDEBUG(D_INFO,
- "Try to recyle %d buffers but nothing left\n",
+ "Try to recycle %d buffers but nothing left\n",
scd->scd_buf_adjust);
scd->scd_buf_adjust = 0;
}
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 8053b05..228927e 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -350,7 +350,7 @@ typedef struct {
} sfw_batch_t;
typedef struct {
- int (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
+ int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test client */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
@@ -572,9 +572,6 @@ swi_state2str (int state)
#undef STATE2STR
}
-#define UNUSED(x) ( (void)(x) )
-
-
#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 82fd363..b8e50ef 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -171,19 +171,14 @@ stt_check_timers(cfs_time_t *last)
int
stt_timer_main(void *arg)
{
- int rc = 0;
- UNUSED(arg);
-
- SET_BUT_UNUSED(rc);
-
cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot);
- rc = wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
+ wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
}
spin_lock(&stt_data.stt_lock);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 93d59b6..209e4c7 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -55,6 +55,6 @@ config LUSTRE_TRANSLATE_ERRNOS
default y
config LUSTRE_LLITE_LLOOP
- bool "Lustre virtual block device"
+ tristate "Lustre virtual block device"
depends on LUSTRE_FS && BLOCK
default m
diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile
index ed21bea..d24f2df 100644
--- a/drivers/staging/lustre/lustre/fid/Makefile
+++ b/drivers/staging/lustre/lustre/fid/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += fid.o
-fid-y := fid_request.o lproc_fid.o fid_lib.o
+fid-y := fid_request.o fid_lib.o
+fid-$(CONFIG_PROC_FS) += lproc_fid.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 294070d..ddd813c 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -54,7 +54,6 @@
#include <lustre_fid.h>
#include "fid_internal.h"
-#ifdef LPROCFS
/*
* Note: this function is only used for testing, it is no safe for production
* use.
@@ -209,4 +208,3 @@ struct lprocfs_vars seq_client_proc_list[] = {
{ "fid", &lprocfs_fid_fid_fops },
{ NULL }
};
-#endif
diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile
index 90d46d8..640fba4 100644
--- a/drivers/staging/lustre/lustre/fld/Makefile
+++ b/drivers/staging/lustre/lustre/fld/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += fld.o
-fld-y := fld_request.o fld_cache.o lproc_fld.o
+fld-y := fld_request.o fld_cache.o
+fld-$(CONFIG_PROC_FS) += lproc_fld.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 4531510..6c37930 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -307,7 +307,7 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
const mdsno_t mdt = range->lsr_index;
/* this is overlap case, these case are checking overlapping with
- * prev range only. fixup will handle overlaping with next range. */
+ * prev range only. fixup will handle overlapping with next range. */
if (f_curr->fce_range.lsr_index == mdt) {
f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 56686b1..5f3935c 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -190,5 +190,4 @@ fld_target_name(struct lu_fld_target *tar)
return (const char *)tar->ft_exp->exp_obd->obd_name;
}
-extern struct proc_dir_entry *fld_type_proc_dir;
#endif /* __FLD_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index e47fd50..896f9fe 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -274,9 +274,9 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
}
EXPORT_SYMBOL(fld_client_del_target);
-#ifdef LPROCFS
struct proc_dir_entry *fld_type_proc_dir = NULL;
+#ifdef LPROCFS
static int fld_client_proc_init(struct lu_client_fld *fld)
{
int rc;
@@ -504,10 +504,7 @@ static int __init fld_mod_init(void)
fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME,
proc_lustre_root,
NULL, NULL);
- if (IS_ERR(fld_type_proc_dir))
- return PTR_ERR(fld_type_proc_dir);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fld_type_proc_dir);
}
static void __exit fld_mod_exit(void)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 052f7d5..530adde 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -56,7 +56,6 @@
#include <lustre_fid.h>
#include "fld_internal.h"
-#ifdef LPROCFS
static int
fld_proc_targets_seq_show(struct seq_file *m, void *unused)
{
@@ -162,5 +161,3 @@ struct lprocfs_vars fld_client_proc_list[] = {
{ "hash", &fld_proc_hash_fops },
{ "cache_flush", &fld_proc_cache_flush_fops },
{ NULL }};
-
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index c485206..4d692dc 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -2388,7 +2388,11 @@ struct cl_io {
* Right now, only two opertaions need to verify layout: glimpse
* and setattr.
*/
- ci_verify_layout:1;
+ ci_verify_layout:1,
+ /**
+ * file is released, restore has to to be triggered by vvp layer
+ */
+ ci_restore_needed:1;
/**
* Number of pages owned by this IO. For invariant checking.
*/
diff --git a/drivers/staging/lustre/lustre/include/dt_object.h b/drivers/staging/lustre/lustre/include/dt_object.h
index e116bb2..9304c26 100644
--- a/drivers/staging/lustre/lustre/include/dt_object.h
+++ b/drivers/staging/lustre/lustre/include/dt_object.h
@@ -692,7 +692,7 @@ struct local_oid_storage {
struct dt_object *los_obj;
/* data used to generate new fids */
- struct mutex los_id_lock;
+ struct mutex los_id_lock;
__u64 los_seq;
__u32 los_last_oid;
};
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
index ff4fc4f..778b123 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
@@ -47,17 +47,17 @@
#error Shoud not include direectly. use #include <lustre_acl.h> instead
#endif
-# include <linux/fs.h>
-# include <linux/dcache.h>
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/posix_acl_xattr.h>
-# define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
-# define LUSTRE_POSIX_ACL_MAX_SIZE \
+#include <linux/fs.h>
+#include <linux/dcache.h>
+
+#include <linux/posix_acl_xattr.h>
+#define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
+#define LUSTRE_POSIX_ACL_MAX_SIZE \
(sizeof(posix_acl_xattr_header) + \
LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(posix_acl_xattr_entry))
-# endif /* CONFIG_FS_POSIX_ACL */
-# include <linux/lustre_intent.h>
-# include <linux/xattr.h> /* XATTR_{REPLACE,CREATE} */
+
+#include <linux/lustre_intent.h>
+#include <linux/xattr.h> /* XATTR_{REPLACE,CREATE} */
#ifndef LUSTRE_POSIX_ACL_MAX_SIZE
# define LUSTRE_POSIX_ACL_MAX_SIZE 0
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_debug.h b/drivers/staging/lustre/lustre/include/linux/lustre_debug.h
deleted file mode 100644
index 11deac7..0000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_debug.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LINUX_LUSTRE_DEBUG_H
-#define _LINUX_LUSTRE_DEBUG_H
-
-#ifndef _LUSTRE_DEBUG_H
-#error Do not #include this file directly. #include <lprocfs_status.h> instead
-#endif
-
-#define LL_CDEBUG_PAGE(mask, page, fmt, arg...) \
- CDEBUG(mask, "page %p map %p index %lu flags %lx count %u priv %0lx: "\
- fmt, page, page->mapping, page->index, (long)page->flags, \
- page_count(page), page_private(page), ## arg)
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_intent.h b/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
index b10ddfa..c491d52 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
@@ -52,8 +52,8 @@ struct lustre_intent_data {
struct lookup_intent {
int it_op;
- int it_flags;
int it_create_mode;
+ __u64 it_flags;
union {
struct lustre_intent_data lustre;
} d;
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
index 9e5df8d..df93912 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
@@ -88,6 +88,7 @@ enum {
LPROC_LL_ALLOC_INODE,
LPROC_LL_SETXATTR,
LPROC_LL_GETXATTR,
+ LPROC_LL_GETXATTR_HITS,
LPROC_LL_LISTXATTR,
LPROC_LL_REMOVEXATTR,
LPROC_LL_INODE_PERM,
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 56b0572..428e3e4 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -370,6 +370,10 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
#define JOBSTATS_DISABLE "disable"
#define JOBSTATS_PROCNAME_UID "procname_uid"
+extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+ int *val, int mult);
+extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
+ long val, int mult);
#ifdef LPROCFS
extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
@@ -641,11 +645,7 @@ extern int lprocfs_rd_filesfree(struct seq_file *m, void *data);
extern int lprocfs_write_helper(const char *buffer, unsigned long count,
int *val);
-extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
- int *val, int mult);
extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
-extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
- long val, int mult);
extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
__u64 *val);
extern int lprocfs_write_frac_u64_helper(const char *buffer,
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index d5b8225..6773bca 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -398,17 +398,6 @@ static inline int lu_device_is_md(const struct lu_device *d)
}
/**
- * Flags for the object layers.
- */
-enum lu_object_flags {
- /**
- * this flags is set if lu_object_operations::loo_object_init() has
- * been called for this layer. Used by lu_object_alloc().
- */
- LU_OBJECT_ALLOCATED = (1 << 0)
-};
-
-/**
* Common object attributes.
*/
struct lu_attr {
@@ -486,14 +475,6 @@ struct lu_object {
*/
struct list_head lo_linkage;
/**
- * Depth. Top level layer depth is 0.
- */
- int lo_depth;
- /**
- * Flags from enum lu_object_flags.
- */
- __u32 lo_flags;
- /**
* Link to the device, for debugging.
*/
struct lu_ref_link lo_dev_ref;
diff --git a/drivers/staging/lustre/lustre/include/lu_target.h b/drivers/staging/lustre/lustre/include/lu_target.h
deleted file mode 100644
index 8d48cf4..0000000
--- a/drivers/staging/lustre/lustre/include/lu_target.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_LU_TARGET_H
-#define _LUSTRE_LU_TARGET_H
-
-#include <dt_object.h>
-#include <lustre_disk.h>
-
-struct lu_target {
- struct obd_device *lut_obd;
- struct dt_device *lut_bottom;
- /** last_rcvd file */
- struct dt_object *lut_last_rcvd;
- /* transaction callbacks */
- struct dt_txn_callback lut_txn_cb;
- /** server data in last_rcvd file */
- struct lr_server_data lut_lsd;
- /** Server last transaction number */
- __u64 lut_last_transno;
- /** Lock protecting last transaction number */
- spinlock_t lut_translock;
- /** Lock protecting client bitmap */
- spinlock_t lut_client_bitmap_lock;
- /** Bitmap of known clients */
- unsigned long *lut_client_bitmap;
-};
-
-typedef void (*tgt_cb_t)(struct lu_target *lut, __u64 transno,
- void *data, int err);
-struct tgt_commit_cb {
- tgt_cb_t tgt_cb_func;
- void *tgt_cb_data;
-};
-
-void tgt_boot_epoch_update(struct lu_target *lut);
-int tgt_last_commit_cb_add(struct thandle *th, struct lu_target *lut,
- struct obd_export *exp, __u64 transno);
-int tgt_new_client_cb_add(struct thandle *th, struct obd_export *exp);
-int tgt_init(const struct lu_env *env, struct lu_target *lut,
- struct obd_device *obd, struct dt_device *dt);
-void tgt_fini(const struct lu_env *env, struct lu_target *lut);
-int tgt_client_alloc(struct obd_export *exp);
-void tgt_client_free(struct obd_export *exp);
-int tgt_client_del(const struct lu_env *env, struct obd_export *exp);
-int tgt_client_add(const struct lu_env *env, struct obd_export *exp, int);
-int tgt_client_new(const struct lu_env *env, struct obd_export *exp);
-int tgt_client_data_read(const struct lu_env *env, struct lu_target *tg,
- struct lsd_client_data *lcd, loff_t *off, int index);
-int tgt_client_data_write(const struct lu_env *env, struct lu_target *tg,
- struct lsd_client_data *lcd, loff_t *off, struct thandle *th);
-int tgt_server_data_read(const struct lu_env *env, struct lu_target *tg);
-int tgt_server_data_write(const struct lu_env *env, struct lu_target *tg,
- struct thandle *th);
-int tgt_server_data_update(const struct lu_env *env, struct lu_target *tg, int sync);
-int tgt_truncate_last_rcvd(const struct lu_env *env, struct lu_target *tg, loff_t off);
-
-#endif /* __LUSTRE_LU_TARGET_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h b/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h
deleted file mode 100644
index 707eb74..0000000
--- a/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/*
- * NOTE: This file is DEPRECATED! Please include lustreapi.h directly
- * instead of this file. This file will be removed from a future version
- * of lustre!
- */
-
-#ifndef _LIBLUSTREAPI_H_
-#define _LIBLUSTREAPI_H_
-
-#include <lustre/lustreapi.h>
-#warning "Including liblustreapi.h is deprecated. Include lustreapi.h directly."
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5ca18d0..5da31c5 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -321,8 +321,11 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
* xattr.
*/
enum lma_compat {
- LMAC_HSM = 0x00000001,
- LMAC_SOM = 0x00000002,
+ LMAC_HSM = 0x00000001,
+ LMAC_SOM = 0x00000002,
+ LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
+ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
+ * under /O/<seq>/d<x>. */
};
/**
@@ -331,10 +334,10 @@ enum lma_compat {
* This information is stored in lustre_mdt_attrs::lma_incompat.
*/
enum lma_incompat {
- LMAI_RELEASED = 0x0000001, /* file is released */
- LMAI_AGENT = 0x00000002, /* agent inode */
- LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
- is on the remote MDT */
+ LMAI_RELEASED = 0x00000001, /* file is released */
+ LMAI_AGENT = 0x00000002, /* agent inode */
+ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
+ is on the remote MDT */
};
#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
@@ -1025,6 +1028,18 @@ struct luda_type {
__u16 lt_type;
};
+#ifndef IFSHIFT
+#define IFSHIFT 12
+#endif
+
+#ifndef IFTODT
+#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
+#endif
+#ifndef DTTOIF
+#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
+#endif
+
+
struct lu_dirpage {
__u64 ldp_hash_start;
__u64 ldp_hash_end;
@@ -1353,7 +1368,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
OBD_CONNECT_EINPROGRESS | \
OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
- OBD_CONNECT_PINGLESS)
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE)
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
@@ -1725,10 +1740,7 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
-
-/* OBD_MD_MDTIDX is used to get MDT index, but it is never been used overwire,
- * and it is already obsolete since 2.3 */
-/* #define OBD_MD_MDTIDX (0x0000000800000000ULL) */
+#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
@@ -1740,7 +1752,9 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
- * under lock */
+ * under lock; for xattr
+ * requests means the
+ * client holds the lock */
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
@@ -1749,6 +1763,7 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
+#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
@@ -1756,6 +1771,9 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
+#define OBD_MD_FLXATTRLOCKED OBD_MD_FLGETATTRLOCK
+#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
+
/* don't forget obdo_fid which is way down at the bottom so it can
* come after the definition of llog_cookie */
@@ -2120,6 +2138,7 @@ extern void lustre_swab_generic_32s (__u32 *val);
#define DISP_ENQ_OPEN_REF 0x00800000
#define DISP_ENQ_CREATE_REF 0x01000000
#define DISP_OPEN_LOCK 0x02000000
+#define DISP_OPEN_LEASE 0x04000000
/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
@@ -2127,8 +2146,9 @@ extern void lustre_swab_generic_32s (__u32 *val);
#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
#define MDS_INODELOCK_PERM 0x000010 /* for permission */
+#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
-#define MDS_INODELOCK_MAXSHIFT 4
+#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
@@ -2207,6 +2227,11 @@ static inline int ll_inode_to_ext_flags(int iflags)
((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
}
+/* 64 possible states */
+enum md_transient_state {
+ MS_RESTORE = (1 << 0), /* restore is running */
+};
+
struct mdt_body {
struct lu_fid fid1;
struct lu_fid fid2;
@@ -2218,7 +2243,9 @@ struct mdt_body {
obd_time ctime;
__u64 blocks; /* XID, in the case of MDS_READPAGE */
__u64 ioepoch;
- __u64 unused1; /* was "ino" until 2.4.0 */
+ __u64 t_state; /* transient file state defined in
+ * enum md_transient_state
+ * was "ino" until 2.4.0 */
__u32 fsuid;
__u32 fsgid;
__u32 capability;
@@ -2373,6 +2400,11 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
* hsm restore) */
#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
unlinked */
+#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
+ * delegation, succeed if it's not
+ * being opened with conflict mode.
+ */
+#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
/* permission for create non-directory file */
#define MAY_CREATE (1 << 7)
@@ -2391,7 +2423,7 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
/* lfs rgetfacl permission check */
#define MAY_RGETFACL (1 << 14)
-enum {
+enum mds_op_bias {
MDS_CHECK_SPLIT = 1 << 0,
MDS_CROSS_REF = 1 << 1,
MDS_VTX_BYPASS = 1 << 2,
@@ -2404,6 +2436,7 @@ enum {
MDS_DATA_MODIFIED = 1 << 9,
MDS_CREATE_VOLATILE = 1 << 10,
MDS_OWNEROVERRIDE = 1 << 11,
+ MDS_HSM_RELEASE = 1 << 12,
};
/* instance of mdt_reint_rec */
@@ -3727,5 +3760,14 @@ struct mdc_swap_layouts {
void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+struct close_data {
+ struct lustre_handle cd_handle;
+ struct lu_fid cd_fid;
+ __u64 cd_data_version;
+ __u64 cd_reserved[8];
+};
+
+void lustre_swab_close_data(struct close_data *data);
+
#endif
/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index c7bd447..6b6c0240 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -244,6 +244,9 @@ struct ost_id {
#define LL_IOC_LMV_SETSTRIPE _IOWR('f', 240, struct lmv_user_md)
#define LL_IOC_LMV_GETSTRIPE _IOWR('f', 241, struct lmv_user_md)
#define LL_IOC_REMOVE_ENTRY _IOWR('f', 242, __u64)
+#define LL_IOC_SET_LEASE _IOWR('f', 243, long)
+#define LL_IOC_GET_LEASE _IO('f', 244)
+#define LL_IOC_HSM_IMPORT _IOWR('f', 245, struct hsm_user_import)
#define LL_STATFS_LMV 1
#define LL_STATFS_LOV 2
@@ -425,8 +428,8 @@ struct obd_uuid {
char uuid[UUID_MAX];
};
-static inline int obd_uuid_equals(const struct obd_uuid *u1,
- const struct obd_uuid *u2)
+static inline bool obd_uuid_equals(const struct obd_uuid *u1,
+ const struct obd_uuid *u2)
{
return strcmp((char *)u1->uuid, (char *)u2->uuid) == 0;
}
@@ -443,7 +446,7 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp)
}
/* For printf's only, make sure uuid is terminated */
-static inline char *obd_uuid2str(struct obd_uuid *uuid)
+static inline char *obd_uuid2str(const struct obd_uuid *uuid)
{
if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
/* Obviously not safe, but for printfs, no real harm done...
@@ -620,10 +623,13 @@ struct if_quotactl {
};
/* swap layout flags */
-#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
-#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
-#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
-#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
+#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
+#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
+#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+
+/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
+#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
struct lustre_swap_layouts {
__u64 sl_flags;
__u32 sl_fd;
@@ -1118,13 +1124,27 @@ static inline int hal_size(struct hsm_action_list *hal)
sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname));
hai = hai_zero(hal);
- for (i = 0 ; i < hal->hal_count ; i++) {
+ for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
sz += cfs_size_round(hai->hai_len);
- hai = hai_next(hai);
- }
- return(sz);
+
+ return sz;
}
+/* HSM file import
+ * describe the attributes to be set on imported file
+ */
+struct hsm_user_import {
+ __u64 hui_size;
+ __u64 hui_atime;
+ __u64 hui_mtime;
+ __u32 hui_atime_ns;
+ __u32 hui_mtime_ns;
+ __u32 hui_uid;
+ __u32 hui_gid;
+ __u32 hui_mode;
+ __u32 hui_archive_id;
+};
+
/* Copytool progress reporting */
#define HP_FLAG_COMPLETED 0x01
#define HP_FLAG_RETRY 0x02
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustreapi.h b/drivers/staging/lustre/lustre/include/lustre/lustreapi.h
deleted file mode 100644
index 63da665..0000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustreapi.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTREAPI_H_
-#define _LUSTREAPI_H_
-
-/** \defgroup llapi llapi
- *
- * @{
- */
-
-#include <lustre/lustre_user.h>
-
-typedef void (*llapi_cb_t)(char *obd_type_name, char *obd_name, char *obd_uuid, void *args);
-
-/* lustreapi message severity level */
-enum llapi_message_level {
- LLAPI_MSG_OFF = 0,
- LLAPI_MSG_FATAL = 1,
- LLAPI_MSG_ERROR = 2,
- LLAPI_MSG_WARN = 3,
- LLAPI_MSG_NORMAL = 4,
- LLAPI_MSG_INFO = 5,
- LLAPI_MSG_DEBUG = 6,
- LLAPI_MSG_MAX
-};
-
-/* the bottom three bits reserved for llapi_message_level */
-#define LLAPI_MSG_MASK 0x00000007
-#define LLAPI_MSG_NO_ERRNO 0x00000010
-
-extern void llapi_msg_set_level(int level);
-extern void llapi_error(int level, int rc, char *fmt, ...);
-#define llapi_err_noerrno(level, fmt, a...) \
- llapi_error((level) | LLAPI_MSG_NO_ERRNO, 0, fmt, ## a)
-extern void llapi_printf(int level, char *fmt, ...);
-extern int llapi_file_create(const char *name, unsigned long long stripe_size,
- int stripe_offset, int stripe_count,
- int stripe_pattern);
-extern int llapi_file_open(const char *name, int flags, int mode,
- unsigned long long stripe_size, int stripe_offset,
- int stripe_count, int stripe_pattern);
-extern int llapi_file_create_pool(const char *name,
- unsigned long long stripe_size,
- int stripe_offset, int stripe_count,
- int stripe_pattern, char *pool_name);
-extern int llapi_file_open_pool(const char *name, int flags, int mode,
- unsigned long long stripe_size,
- int stripe_offset, int stripe_count,
- int stripe_pattern, char *pool_name);
-extern int llapi_poollist(const char *name);
-extern int llapi_get_poollist(const char *name, char **poollist, int list_size,
- char *buffer, int buffer_size);
-extern int llapi_get_poolmembers(const char *poolname, char **members,
- int list_size, char *buffer, int buffer_size);
-extern int llapi_file_get_stripe(const char *path, struct lov_user_md *lum);
-#define HAVE_LLAPI_FILE_LOOKUP
-extern int llapi_file_lookup(int dirfd, const char *name);
-
-#define VERBOSE_COUNT 0x1
-#define VERBOSE_SIZE 0x2
-#define VERBOSE_OFFSET 0x4
-#define VERBOSE_POOL 0x8
-#define VERBOSE_DETAIL 0x10
-#define VERBOSE_OBJID 0x20
-#define VERBOSE_GENERATION 0x40
-#define VERBOSE_MDTINDEX 0x80
-#define VERBOSE_ALL (VERBOSE_COUNT | VERBOSE_SIZE | VERBOSE_OFFSET | \
- VERBOSE_POOL | VERBOSE_OBJID | VERBOSE_GENERATION)
-
-struct find_param {
- unsigned int maxdepth;
- time_t atime;
- time_t mtime;
- time_t ctime;
- int asign; /* cannot be bitfields due to using pointers to */
- int csign; /* access them during argument parsing. */
- int msign;
- int type;
- int size_sign:2, /* these need to be signed values */
- stripesize_sign:2,
- stripecount_sign:2;
- unsigned long long size;
- unsigned long long size_units;
- uid_t uid;
- gid_t gid;
-
- unsigned long zeroend:1,
- recursive:1,
- exclude_pattern:1,
- exclude_type:1,
- exclude_obd:1,
- exclude_mdt:1,
- exclude_gid:1,
- exclude_uid:1,
- check_gid:1, /* group ID */
- check_uid:1, /* user ID */
- check_pool:1, /* LOV pool name */
- check_size:1, /* file size */
- exclude_pool:1,
- exclude_size:1,
- exclude_atime:1,
- exclude_mtime:1,
- exclude_ctime:1,
- get_lmv:1, /* get MDT list from LMV */
- raw:1, /* do not fill in defaults */
- check_stripesize:1, /* LOV stripe size */
- exclude_stripesize:1,
- check_stripecount:1, /* LOV stripe count */
- exclude_stripecount:1;
-
- int verbose;
- int quiet;
-
- /* regular expression */
- char *pattern;
-
- char *print_fmt;
-
- struct obd_uuid *obduuid;
- int num_obds;
- int num_alloc_obds;
- int obdindex;
- int *obdindexes;
-
- struct obd_uuid *mdtuuid;
- int num_mdts;
- int num_alloc_mdts;
- int mdtindex;
- int *mdtindexes;
- int file_mdtindex;
-
- int lumlen;
- struct lov_user_mds_data *lmd;
-
- char poolname[LOV_MAXPOOLNAME + 1];
-
- int fp_lmv_count;
- struct lmv_user_md *fp_lmv_md;
-
- unsigned long long stripesize;
- unsigned long long stripesize_units;
- unsigned long long stripecount;
-
- /* In-process parameters. */
- unsigned long got_uuids:1,
- obds_printed:1,
- have_fileinfo:1; /* file attrs and LOV xattr */
- unsigned int depth;
- dev_t st_dev;
-};
-
-extern int llapi_ostlist(char *path, struct find_param *param);
-extern int llapi_uuid_match(char *real_uuid, char *search_uuid);
-extern int llapi_getstripe(char *path, struct find_param *param);
-extern int llapi_find(char *path, struct find_param *param);
-
-extern int llapi_file_fget_mdtidx(int fd, int *mdtidx);
-extern int llapi_dir_create_pool(const char *name, int flags, int stripe_offset,
- int stripe_count, int stripe_pattern,
- char *poolname);
-int llapi_direntry_remove(char *dname);
-extern int llapi_obd_statfs(char *path, __u32 type, __u32 index,
- struct obd_statfs *stat_buf,
- struct obd_uuid *uuid_buf);
-extern int llapi_ping(char *obd_type, char *obd_name);
-extern int llapi_target_check(int num_types, char **obd_types, char *dir);
-extern int llapi_file_get_lov_uuid(const char *path, struct obd_uuid *lov_uuid);
-extern int llapi_file_get_lmv_uuid(const char *path, struct obd_uuid *lmv_uuid);
-extern int llapi_file_fget_lov_uuid(int fd, struct obd_uuid *lov_uuid);
-extern int llapi_lov_get_uuids(int fd, struct obd_uuid *uuidp, int *ost_count);
-extern int llapi_lmv_get_uuids(int fd, struct obd_uuid *uuidp, int *mdt_count);
-extern int llapi_is_lustre_mnttype(const char *type);
-extern int llapi_search_ost(char *fsname, char *poolname, char *ostname);
-extern int llapi_get_obd_count(char *mnt, int *count, int is_mdt);
-extern int parse_size(char *optarg, unsigned long long *size,
- unsigned long long *size_units, int bytes_spec);
-extern int llapi_search_mounts(const char *pathname, int index,
- char *mntdir, char *fsname);
-extern int llapi_search_fsname(const char *pathname, char *fsname);
-extern int llapi_getname(const char *path, char *buf, size_t size);
-
-extern void llapi_ping_target(char *obd_type, char *obd_name,
- char *obd_uuid, void *args);
-
-extern int llapi_search_rootpath(char *pathname, const char *fsname);
-
-struct mntent;
-#define HAVE_LLAPI_IS_LUSTRE_MNT
-extern int llapi_is_lustre_mnt(struct mntent *mnt);
-extern int llapi_quotachown(char *path, int flag);
-extern int llapi_quotacheck(char *mnt, int check_type);
-extern int llapi_poll_quotacheck(char *mnt, struct if_quotacheck *qchk);
-extern int llapi_quotactl(char *mnt, struct if_quotactl *qctl);
-extern int llapi_target_iterate(int type_num, char **obd_type, void *args,
- llapi_cb_t cb);
-extern int llapi_get_connect_flags(const char *mnt, __u64 *flags);
-extern int llapi_lsetfacl(int argc, char *argv[]);
-extern int llapi_lgetfacl(int argc, char *argv[]);
-extern int llapi_rsetfacl(int argc, char *argv[]);
-extern int llapi_rgetfacl(int argc, char *argv[]);
-extern int llapi_cp(int argc, char *argv[]);
-extern int llapi_ls(int argc, char *argv[]);
-extern int llapi_fid2path(const char *device, const char *fidstr, char *path,
- int pathlen, long long *recno, int *linkno);
-extern int llapi_path2fid(const char *path, lustre_fid *fid);
-extern int llapi_fd2fid(const int fd, lustre_fid *fid);
-
-extern int llapi_get_version(char *buffer, int buffer_size, char **version);
-extern int llapi_get_data_version(int fd, __u64 *data_version, __u64 flags);
-extern int llapi_hsm_state_get(const char *path, struct hsm_user_state *hus);
-extern int llapi_hsm_state_set(const char *path, __u64 setmask, __u64 clearmask,
- __u32 archive_id);
-
-extern int llapi_create_volatile_idx(char *directory, int idx, int mode);
-static inline int llapi_create_volatile(char *directory, int mode)
-{
- return llapi_create_volatile_idx(directory, -1, mode);
-}
-
-
-extern int llapi_fswap_layouts(const int fd1, const int fd2,
- __u64 dv1, __u64 dv2, __u64 flags);
-extern int llapi_swap_layouts(const char *path1, const char *path2,
- __u64 dv1, __u64 dv2, __u64 flags);
-
-/* Changelog interface. priv is private state, managed internally
- by these functions */
-#define CHANGELOG_FLAG_FOLLOW 0x01 /* Not yet implemented */
-#define CHANGELOG_FLAG_BLOCK 0x02 /* Blocking IO makes sense in case of
- slow user parsing of the records, but it also prevents us from cleaning
- up if the records are not consumed. */
-
-/* Records received are in extentded format now, though most of them are still
- * written in disk in changelog_rec format (to save space and time), it's
- * converted to extented format in the lustre api to ease changelog analysis. */
-#define HAVE_CHANGELOG_EXTEND_REC 1
-
-extern int llapi_changelog_start(void **priv, int flags, const char *mdtname,
- long long startrec);
-extern int llapi_changelog_fini(void **priv);
-extern int llapi_changelog_recv(void *priv, struct changelog_ext_rec **rech);
-extern int llapi_changelog_free(struct changelog_ext_rec **rech);
-/* Allow records up to endrec to be destroyed; requires registered id. */
-extern int llapi_changelog_clear(const char *mdtname, const char *idstr,
- long long endrec);
-
-/* HSM copytool interface.
- * priv is private state, managed internally by these functions
- */
-struct hsm_copytool_private;
-extern int llapi_hsm_copytool_start(struct hsm_copytool_private **priv,
- char *fsname, int flags,
- int archive_count, int *archives);
-extern int llapi_hsm_copytool_fini(struct hsm_copytool_private **priv);
-extern int llapi_hsm_copytool_recv(struct hsm_copytool_private *priv,
- struct hsm_action_list **hal, int *msgsize);
-extern int llapi_hsm_copytool_free(struct hsm_action_list **hal);
-extern int llapi_hsm_copy_start(char *mnt, struct hsm_copy *copy,
- const struct hsm_action_item *hai);
-extern int llapi_hsm_copy_end(char *mnt, struct hsm_copy *copy,
- const struct hsm_progress *hp);
-extern int llapi_hsm_progress(char *mnt, struct hsm_progress *hp);
-extern int llapi_hsm_import(const char *dst, int archive, struct stat *st,
- unsigned long long stripe_size, int stripe_offset,
- int stripe_count, int stripe_pattern,
- char *pool_name, lustre_fid *newfid);
-
-/* HSM user interface */
-extern struct hsm_user_request *llapi_hsm_user_request_alloc(int itemcount,
- int data_len);
-extern int llapi_hsm_request(char *mnt, struct hsm_user_request *request);
-extern int llapi_hsm_current_action(const char *path,
- struct hsm_current_action *hca);
-/** @} llapi */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
index 3d9e446..7ec91ed 100644
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -45,25 +45,6 @@
#include <lustre_net.h>
#include <obd.h>
-#include <linux/lustre_debug.h>
-
-#define ASSERT_MAX_SIZE_MB 60000ULL
-#define ASSERT_PAGE_INDEX(index, OP) \
-do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \
- CERROR("bad page index %lu > %llu\n", index, \
- ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \
- libcfs_debug = ~0UL; \
- OP; \
-}} while(0)
-
-#define ASSERT_FILE_OFFSET(offset, OP) \
-do { if (offset > ASSERT_MAX_SIZE_MB << 20) { \
- CERROR("bad file offset %llu > %llu\n", offset, \
- ASSERT_MAX_SIZE_MB << 20); \
- libcfs_debug = ~0UL; \
- OP; \
-}} while(0)
-
/* lib/debug.c */
void dump_lniobuf(struct niobuf_local *lnb);
int dump_req(struct ptlrpc_request *req);
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 9228b16..1de9a8b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -50,6 +50,7 @@
#include <linux/libcfs/libcfs.h>
#include <linux/lnet/types.h>
+#include <linux/backing-dev.h>
/****************** on-disk files *********************/
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index bc2b82f..ec4bb5e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1285,10 +1285,11 @@ void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
-int ldlm_proc_setup(void);
#ifdef LPROCFS
+int ldlm_proc_setup(void);
void ldlm_proc_cleanup(void);
#else
+static inline int ldlm_proc_setup(void) { return 0; }
static inline void ldlm_proc_cleanup(void) {}
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 8c34d9d..75716f1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -35,7 +35,7 @@
#ifndef LDLM_ALL_FLAGS_MASK
/** l_flags bits marked as "all_flags" bits */
-#define LDLM_FL_ALL_FLAGS_MASK 0x007FFFFFC08F132FULL
+#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F132FULL
/** l_flags bits marked as "ast" bits */
#define LDLM_FL_AST_MASK 0x0000000080000000ULL
@@ -53,7 +53,7 @@
#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x007FFFFF00000000ULL
+#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
/** l_flags bits marked as "on_wire" bits */
#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
@@ -358,6 +358,12 @@
#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG(( _l), 1ULL << 54)
#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
+/** Flag whether this lock can be reused. Used by exclusive open. */
+#define LDLM_FL_EXCL 0x0080000000000000ULL /* bit 55 */
+#define ldlm_is_excl(_l) LDLM_TEST_FLAG((_l), 1ULL << 55)
+#define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55)
+#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
+
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
@@ -414,47 +420,49 @@ static int hf_lustre_ldlm_fl_server_lock = -1;
static int hf_lustre_ldlm_fl_res_locked = -1;
static int hf_lustre_ldlm_fl_waited = -1;
static int hf_lustre_ldlm_fl_ns_srv = -1;
+static int hf_lustre_ldlm_fl_excl = -1;
const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- { 0, NULL }
+ {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
+ {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
+ {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
+ {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
+ {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
+ {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
+ {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
+ {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
+ {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
+ {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
+ {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
+ {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
+ {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
+ {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
+ {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
+ {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
+ {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
+ {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
+ {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
+ {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
+ {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
+ {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
+ {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
+ {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
+ {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
+ {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
+ {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
+ {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
+ {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
+ {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
+ {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
+ {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
+ {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
+ {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
+ {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
+ {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
+ {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
+ {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
+ { 0, NULL }
};
#endif /* WIRESHARK_COMPILE */
#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ff11953..84a897e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -431,12 +431,6 @@ struct lu_server_seq {
struct seq_server_site *lss_site;
};
-struct com_thread_info;
-int seq_query(struct com_thread_info *info);
-
-struct ptlrpc_request;
-int seq_handle(struct ptlrpc_request *req);
-
/* Server methods */
int seq_server_init(struct lu_server_seq *seq,
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index 105f6d6..f3ae02b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -58,9 +58,6 @@ void ptlrpc_activate_import(struct obd_import *imp);
void ptlrpc_deactivate_import(struct obd_import *imp);
void ptlrpc_invalidate_import(struct obd_import *imp);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
-int ptlrpc_check_suspend(void);
-void ptlrpc_activate_timeouts(struct obd_import *imp);
-void ptlrpc_deactivate_timeouts(struct obd_import *imp);
/** @} ha */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 5e11107..609a090 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -81,11 +81,12 @@ struct client_obd *client_conn2cli(struct lustre_handle *conn);
struct md_open_data;
struct obd_client_handle {
- struct lustre_handle och_fh;
- struct lu_fid och_fid;
- struct md_open_data *och_mod;
- __u32 och_magic;
- int och_flags;
+ struct lustre_handle och_fh;
+ struct lu_fid och_fid;
+ struct md_open_data *och_mod;
+ struct lustre_handle och_lease_handle; /* open lock for lease */
+ __u32 och_magic;
+ fmode_t och_flags;
};
#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 721aa05..896c757 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -136,7 +136,11 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle **lgh, struct llog_logid *logid,
char *name, enum llog_open_param open_param);
int llog_close(const struct lu_env *env, struct llog_handle *cathandle);
-int llog_get_size(struct llog_handle *loghandle);
+int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name);
+int llog_backup(const struct lu_env *env, struct obd_device *obd,
+ struct llog_ctxt *ctxt, struct llog_ctxt *bak_ctxt,
+ char *name, char *backup);
/* llog_process flags */
#define LLOG_FLAG_NODEAMON 0x0001
@@ -382,6 +386,13 @@ static inline int llog_data_len(int len)
return cfs_size_round(len);
}
+static inline int llog_get_size(struct llog_handle *loghandle)
+{
+ if (loghandle && loghandle->lgh_hdr)
+ return loghandle->lgh_hdr->llh_count;
+ return 0;
+}
+
static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
{
atomic_inc(&ctxt->loc_refcount);
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 1900025..c1e0270 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -48,12 +48,9 @@
* @{
*/
-# include <linux/fs.h>
-# include <linux/dcache.h>
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/posix_acl_xattr.h>
-# endif /* CONFIG_FS_POSIX_ACL */
-# include <linux/lustre_intent.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/lustre_intent.h>
#include <lustre_handles.h>
#include <linux/libcfs/libcfs.h>
#include <obd_class.h>
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 72edf01..d8d0880 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -264,241 +264,7 @@
#define LDLM_MAXREQSIZE (5 * 1024)
#define LDLM_MAXREPSIZE (1024)
- /*
- * MDS threads constants:
- *
- * Please see examples in "Thread Constants", MDS threads number will be at
- * the comparable level of old versions, unless the server has many cores.
- */
-#ifndef MDS_MAX_THREADS
-#define MDS_MAX_THREADS 1024
-#define MDS_MAX_OTHR_THREADS 256
-
-#else /* MDS_MAX_THREADS */
-#if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT
-#undef MDS_MAX_THREADS
-#define MDS_MAX_THREADS PTLRPC_NTHRS_INIT
-#endif
-#define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2)
-#endif
-
-/* default service */
-#define MDS_THR_FACTOR 8
-#define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_NTHRS_MAX MDS_MAX_THREADS
-#define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX)
-
-/* read-page service */
-#define MDS_RDPG_THR_FACTOR 4
-#define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS
-#define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX)
-
-/* these should be removed when we remove setattr service in the future */
-#define MDS_SETA_THR_FACTOR 4
-#define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS
-#define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX)
-
-/* non-affinity threads */
-#define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS
-
-#define MDS_NBUFS 64
-
-/**
- * Assume file name length = FNAME_MAX = 256 (true for ext3).
- * path name length = PATH_MAX = 4096
- * LOV MD size max = EA_MAX = 24 * 2000
- * (NB: 24 is size of lov_ost_data)
- * LOV LOGCOOKIE size max = 32 * 2000
- * (NB: 32 is size of llog_cookie)
- * symlink: FNAME_MAX + PATH_MAX <- largest
- * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
- * rename: FNAME_MAX + FNAME_MAX
- * open: FNAME_MAX + EA_MAX
- *
- * MDS_MAXREQSIZE ~= 4736 bytes =
- * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
- * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
- *
- * Realistic size is about 512 bytes (20 character name + 128 char symlink),
- * except in the open case where there are a large number of OSTs in a LOV.
- */
-#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
-#define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */
-
-/**
- * MDS incoming request with LOV EA
- * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate
- */
-#define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \
- 362 + LOV_MAX_STRIPE_COUNT * 24)
-/**
- * MDS outgoing reply with LOV EA
- *
- * NB: max reply size Lustre 2.4+ client can get from old MDS is:
- * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes
- *
- * but 2.4 or later MDS will never send reply with llog_cookie to any
- * version client. This macro is defined for server side reply buffer size.
- */
-#define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE
-
-/**
- * This is the size of a maximum REINT_SETXATTR request:
- *
- * lustre_msg 56 (32 + 4 x 5 + 4)
- * ptlrpc_body 184
- * mdt_rec_setxattr 136
- * lustre_capa 120
- * name 256 (XATTR_NAME_MAX)
- * value 65536 (XATTR_SIZE_MAX)
- */
-#define MDS_EA_MAXREQSIZE 66288
-
-/**
- * These are the maximum request and reply sizes (rounded up to 1 KB
- * boundaries) for the "regular" MDS_REQUEST_PORTAL and MDS_REPLY_PORTAL.
- */
-#define MDS_REG_MAXREQSIZE (((max(MDS_EA_MAXREQSIZE, \
- MDS_LOV_MAXREQSIZE) + 1023) >> 10) << 10)
-#define MDS_REG_MAXREPSIZE MDS_REG_MAXREQSIZE
-
-/**
- * The update request includes all of updates from the create, which might
- * include linkea (4K maxim), together with other updates, we set it to 9K:
- * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
- */
-#define MDS_OUT_MAXREQSIZE (9 * 1024)
-#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE
-
-/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
-#define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
- 8 * 1024)
-
-/**
- * MDS_REG_BUFSIZE should at least be MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD.
- * However, we need to allocate a much larger buffer for it because LNet
- * requires each MD(rqbd) has at least MDS_REQ_MAXREQSIZE bytes left to avoid
- * dropping of maximum-sized incoming request. So if MDS_REG_BUFSIZE is only a
- * little larger than MDS_REG_MAXREQSIZE, then it can only fit in one request
- * even there are about MDS_REG_MAX_REQSIZE bytes left in a rqbd, and memory
- * utilization is very low.
- *
- * In the meanwhile, size of rqbd can't be too large, because rqbd can't be
- * reused until all requests fit in it have been processed and released,
- * which means one long blocked request can prevent the rqbd be reused.
- * Now we set request buffer size to 160 KB, so even each rqbd is unlinked
- * from LNet with unused 65 KB, buffer utilization will be about 59%.
- * Please check LU-2432 for details.
- */
-#define MDS_REG_BUFSIZE max(MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
- 160 * 1024)
-
-/**
- * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
- * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some
- * extra bytes to each request buffer to improve buffer utilization rate.
- */
-#define MDS_OUT_BUFSIZE max(MDS_OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
- 24 * 1024)
-
-/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
-#define FLD_MAXREQSIZE (160)
-
-/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
-#define FLD_MAXREPSIZE (152)
-#define FLD_BUFSIZE (1 << 12)
-
-/**
- * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
- * __u32 padding */
-#define SEQ_MAXREQSIZE (160)
-
-/** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
-#define SEQ_MAXREPSIZE (152)
-#define SEQ_BUFSIZE (1 << 12)
-
-/** MGS threads must be >= 3, see bug 22458 comment #28 */
-#define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
-#define MGS_NTHRS_MAX 32
-
-#define MGS_NBUFS 64
-#define MGS_BUFSIZE (8 * 1024)
-#define MGS_MAXREQSIZE (7 * 1024)
-#define MGS_MAXREPSIZE (9 * 1024)
-
- /*
- * OSS threads constants:
- *
- * Given 8 as factor and 64 as base threads number
- *
- * example 1):
- * On 8-core server configured to 2 partitions, we will have
- * 64 + 8 * 4 = 96 threads for each partition, 192 total threads.
- *
- * example 2):
- * On 32-core machine configured to 4 partitions, we will have
- * 64 + 8 * 8 = 112 threads for each partition, so total threads number
- * will be 112 * 4 = 448.
- *
- * example 3):
- * On 64-core machine configured to 4 partitions, we will have
- * 64 + 16 * 8 = 192 threads for each partition, so total threads number
- * will be 192 * 4 = 768 which is above limit OSS_NTHRS_MAX(512), so we
- * cut off the value to OSS_NTHRS_MAX(512) / 4 which is 128 threads
- * for each partition.
- *
- * So we can see that with these constants, threads number wil be at the
- * similar level of old versions, unless the server has many cores.
- */
- /* depress threads factor for VM with small memory size */
-#define OSS_THR_FACTOR min_t(int, 8, \
- NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
-#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
-#define OSS_NTHRS_BASE 64
-#define OSS_NTHRS_MAX 512
-
-/* threads for handling "create" request */
-#define OSS_CR_THR_FACTOR 1
-#define OSS_CR_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define OSS_CR_NTHRS_BASE 8
-#define OSS_CR_NTHRS_MAX 64
-
-/**
- * OST_IO_MAXREQSIZE ~=
- * lustre_msg + ptlrpc_body + obdo + obd_ioobj +
- * DT_MAX_BRW_PAGES * niobuf_remote
- *
- * - single object with 16 pages is 512 bytes
- * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
- * - Must be a multiple of 1024
- * - actual size is about 18K
- */
-#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \
- sizeof(struct ptlrpc_body) + \
- sizeof(struct obdo) + \
- sizeof(struct obd_ioobj) + \
- sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES)
-/**
- * FIEMAP request can be 4K+ for now
- */
#define OST_MAXREQSIZE (5 * 1024)
-#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
- (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
-
-#define OST_MAXREPSIZE (9 * 1024)
-#define OST_IO_MAXREPSIZE OST_MAXREPSIZE
-
-#define OST_NBUFS 64
-/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
-#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
-/**
- * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
- * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
- */
-#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
@@ -3403,10 +3169,8 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
-cfs_time_t ptlrpc_suspend_wakeup_time(void);
void ping_evictor_start(void);
void ping_evictor_stop(void);
-int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
void ptlrpc_pinger_ir_up(void);
void ptlrpc_pinger_ir_down(void);
/** @} */
@@ -3470,15 +3234,6 @@ static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
#endif
/** @} */
-/* ptlrpc/llog_server.c */
-int llog_origin_handle_open(struct ptlrpc_request *req);
-int llog_origin_handle_destroy(struct ptlrpc_request *req);
-int llog_origin_handle_prev_block(struct ptlrpc_request *req);
-int llog_origin_handle_next_block(struct ptlrpc_request *req);
-int llog_origin_handle_read_header(struct ptlrpc_request *req);
-int llog_origin_handle_close(struct ptlrpc_request *req);
-int llog_origin_handle_cancel(struct ptlrpc_request *req);
-
/* ptlrpc/llog_client.c */
extern struct llog_operations llog_client_ops;
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index f4d3820..a83db61 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -164,6 +164,7 @@ extern struct req_format RQF_UPDATE_OBJ;
*/
extern struct req_format RQF_MDS_GETATTR_NAME;
extern struct req_format RQF_MDS_CLOSE;
+extern struct req_format RQF_MDS_RELEASE_CLOSE;
extern struct req_format RQF_MDS_PIN;
extern struct req_format RQF_MDS_UNPIN;
extern struct req_format RQF_MDS_CONNECT;
@@ -229,6 +230,7 @@ extern struct req_format RQF_LDLM_INTENT_GETATTR;
extern struct req_format RQF_LDLM_INTENT_OPEN;
extern struct req_format RQF_LDLM_INTENT_CREATE;
extern struct req_format RQF_LDLM_INTENT_UNLINK;
+extern struct req_format RQF_LDLM_INTENT_GETXATTR;
extern struct req_format RQF_LDLM_INTENT_QUOTA;
extern struct req_format RQF_LDLM_CANCEL;
extern struct req_format RQF_LDLM_CALLBACK;
@@ -245,6 +247,8 @@ extern struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK;
extern struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER;
extern struct req_format RQF_LLOG_ORIGIN_CONNECT;
+extern struct req_format RQF_CONNECT;
+
extern struct req_msg_field RMF_GENERIC_DATA;
extern struct req_msg_field RMF_PTLRPC_BODY;
extern struct req_msg_field RMF_MDT_BODY;
@@ -260,6 +264,7 @@ extern struct req_msg_field RMF_GETINFO_VAL;
extern struct req_msg_field RMF_GETINFO_VALLEN;
extern struct req_msg_field RMF_GETINFO_KEY;
extern struct req_msg_field RMF_IDX_INFO;
+extern struct req_msg_field RMF_CLOSE_DATA;
/*
* connection handle received in MDS_CONNECT request.
@@ -275,6 +280,8 @@ extern struct req_msg_field RMF_LAYOUT_INTENT;
extern struct req_msg_field RMF_MDT_MD;
extern struct req_msg_field RMF_REC_REINT;
extern struct req_msg_field RMF_EADATA;
+extern struct req_msg_field RMF_EAVALS;
+extern struct req_msg_field RMF_EAVALS_LENS;
extern struct req_msg_field RMF_ACL;
extern struct req_msg_field RMF_LOGCOOKIES;
extern struct req_msg_field RMF_CAPA1;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 70b8b13..885247d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -903,12 +903,6 @@ struct ptlrpc_bulk_sec_desc {
/*
- * lprocfs
- */
-struct proc_dir_entry;
-extern struct proc_dir_entry *sptlrpc_proc_root;
-
-/*
* round size up to next power of 2, for slab allocation.
* @size must be sane (can't overflow after round up)
*/
@@ -1067,7 +1061,18 @@ void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
/* misc */
const char * sec2target_str(struct ptlrpc_sec *sec);
+/*
+ * lprocfs
+ */
+#ifdef LPROCFS
+struct proc_dir_entry;
+extern struct proc_dir_entry *sptlrpc_proc_root;
int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
+#else
+#define sptlrpc_proc_root NULL
+static inline int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev)
+{ return 0; }
+#endif
/*
* server side
diff --git a/drivers/staging/lustre/lustre/include/md_object.h b/drivers/staging/lustre/lustre/include/md_object.h
index daf93af..7b45b47 100644
--- a/drivers/staging/lustre/lustre/include/md_object.h
+++ b/drivers/staging/lustre/lustre/include/md_object.h
@@ -352,8 +352,8 @@ struct md_device_operations {
int (*mdo_root_get)(const struct lu_env *env, struct md_device *m,
struct lu_fid *f);
- int (*mdo_maxsize_get)(const struct lu_env *env, struct md_device *m,
- int *md_size, int *cookie_size);
+ int (*mdo_maxeasize_get)(const struct lu_env *env, struct md_device *m,
+ int *easize);
int (*mdo_statfs)(const struct lu_env *env, struct md_device *m,
struct obd_statfs *sfs);
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index d0aea15..c3470ce 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -399,8 +399,8 @@ struct client_obd {
/* mgc datastruct */
struct semaphore cl_mgc_sem;
- struct vfsmount *cl_mgc_vfsmnt;
- struct dentry *cl_mgc_configs_dir;
+ struct local_oid_storage *cl_mgc_los;
+ struct dt_object *cl_mgc_configs_dir;
atomic_t cl_mgc_refcount;
struct obd_export *cl_mgc_mgsexp;
@@ -1022,6 +1022,7 @@ struct lu_context;
#define IT_LAYOUT (1 << 10)
#define IT_QUOTA_DQACQ (1 << 11)
#define IT_QUOTA_CONN (1 << 12)
+#define IT_SETXATTR (1 << 13)
static inline int it_to_lock_mode(struct lookup_intent *it)
{
@@ -1031,6 +1032,10 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP |
IT_LAYOUT))
return LCK_CR;
+ else if (it->it_op & IT_GETXATTR)
+ return LCK_PR;
+ else if (it->it_op & IT_SETXATTR)
+ return LCK_PW;
LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
return -EINVAL;
@@ -1070,7 +1075,7 @@ struct md_op_data {
struct obd_capa *op_capa2;
/* Various operation flags. */
- __u32 op_bias;
+ enum mds_op_bias op_bias;
/* Operation type */
__u32 op_opc;
@@ -1084,6 +1089,10 @@ struct md_op_data {
/* used to transfer info between the stacks of MD client
* see enum op_cli_flags */
__u32 op_cli_flags;
+
+ /* File object data version for HSM release, on client */
+ __u64 op_data_version;
+ struct lustre_handle op_lease_handle;
};
enum op_cli_flags {
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 9697e7f..977bc23 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -256,6 +256,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
#define OBD_FAIL_OSD_FID_MAPPING 0x193
#define OBD_FAIL_OSD_LMA_INCOMPAT 0x194
+#define OBD_FAIL_OSD_COMPAT_INVALID_ENTRY 0x195
#define OBD_FAIL_OST 0x200
#define OBD_FAIL_OST_CONNECT_NET 0x201
@@ -416,6 +417,13 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903
#define OBD_FAIL_MGS_PAUSE_REQ 0x904
#define OBD_FAIL_MGS_PAUSE_TARGET_REG 0x905
+#define OBD_FAIL_MGS_CONNECT_NET 0x906
+#define OBD_FAIL_MGS_DISCONNECT_NET 0x907
+#define OBD_FAIL_MGS_SET_INFO_NET 0x908
+#define OBD_FAIL_MGS_EXCEPTION_NET 0x909
+#define OBD_FAIL_MGS_TARGET_REG_NET 0x90a
+#define OBD_FAIL_MGS_TARGET_DEL_NET 0x90b
+#define OBD_FAIL_MGS_CONFIG_READ_NET 0x90c
#define OBD_FAIL_QUOTA_DQACQ_NET 0xA01
#define OBD_FAIL_QUOTA_EDQUOT 0xA02
@@ -457,6 +465,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_LOCK_STATE_WAIT_INTR 0x1402
#define OBD_FAIL_LOV_INIT 0x1403
#define OBD_FAIL_GLIMPSE_DELAY 0x1404
+#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
#define OBD_FAIL_FID_INDIR 0x1501
#define OBD_FAIL_FID_INLMA 0x1502
@@ -498,6 +507,8 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
extern atomic_t libcfs_kmemory;
+extern void obd_update_maxusage(void);
+
#ifdef LPROCFS
#define obd_memory_add(size) \
lprocfs_counter_add(obd_memory, OBD_MEMORY_STAT, (long)(size))
@@ -516,7 +527,6 @@ extern atomic_t libcfs_kmemory;
lprocfs_stats_collector(obd_memory, OBD_MEMORY_PAGES_STAT, \
LPROCFS_FIELDS_FLAGS_SUM)
-extern void obd_update_maxusage(void);
extern __u64 obd_memory_max(void);
extern __u64 obd_pages_max(void);
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index e60c04d..94b1641 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -79,27 +79,27 @@ static struct lu_kmem_descr ccc_caches[] = {
{
.ckd_cache = &ccc_lock_kmem,
.ckd_name = "ccc_lock_kmem",
- .ckd_size = sizeof (struct ccc_lock)
+ .ckd_size = sizeof(struct ccc_lock)
},
{
.ckd_cache = &ccc_object_kmem,
.ckd_name = "ccc_object_kmem",
- .ckd_size = sizeof (struct ccc_object)
+ .ckd_size = sizeof(struct ccc_object)
},
{
.ckd_cache = &ccc_thread_kmem,
.ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof (struct ccc_thread_info),
+ .ckd_size = sizeof(struct ccc_thread_info),
},
{
.ckd_cache = &ccc_session_kmem,
.ckd_name = "ccc_session_kmem",
- .ckd_size = sizeof (struct ccc_session)
+ .ckd_size = sizeof(struct ccc_session)
},
{
.ckd_cache = &ccc_req_kmem,
.ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof (struct ccc_req)
+ .ckd_size = sizeof(struct ccc_req)
},
{
.ckd_cache = NULL
@@ -162,7 +162,7 @@ struct lu_context_key ccc_session_key = {
/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
-// LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key);
+/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
int ccc_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
@@ -1006,6 +1006,12 @@ again:
cl_io_fini(env, io);
if (unlikely(io->ci_need_restart))
goto again;
+ /* HSM import case: file is released, cannot be restored
+ * no need to fail except if restore registration failed
+ * with -ENODATA */
+ if (result == -ENODATA && io->ci_restore_needed &&
+ io->ci_result != -ENODATA)
+ result = 0;
cl_env_put(env, &refcheck);
return result;
}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
index 2b4dbee..e04c2d3 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
@@ -140,7 +140,9 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc) {
- LASSERT(rc < 0);
+ /* Does not make sense to take GL for released layout */
+ if (rc > 0)
+ rc = -ENOTSUPP;
cl_env_put(env, &refcheck);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 39fcdac..c9aae13 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -95,20 +95,12 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
lock->l_policy_data.l_flock.start));
}
-static inline int ldlm_flock_blocking_link(struct ldlm_lock *req,
- struct ldlm_lock *lock)
+static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
+ struct ldlm_lock *lock)
{
- int rc = 0;
-
/* For server only */
if (req->l_export == NULL)
- return 0;
-
- if (unlikely(req->l_export->exp_flock_hash == NULL)) {
- rc = ldlm_init_flock_export(req->l_export);
- if (rc)
- goto error;
- }
+ return;
LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
@@ -121,8 +113,6 @@ static inline int ldlm_flock_blocking_link(struct ldlm_lock *req,
cfs_hash_add(req->l_export->exp_flock_hash,
&req->l_policy_data.l_flock.owner,
&req->l_exp_flock_hash);
-error:
- return rc;
}
static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
@@ -250,7 +240,6 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
int overlaps = 0;
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { NULL };
- int rc;
CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
LPU64" end "LPU64"\n", *flags,
@@ -328,12 +317,8 @@ reprocess:
/* add lock to blocking list before deadlock
* check to prevent race */
- rc = ldlm_flock_blocking_link(req, lock);
- if (rc) {
- ldlm_flock_destroy(req, mode, *flags);
- *err = rc;
- return LDLM_ITER_STOP;
- }
+ ldlm_flock_blocking_link(req, lock);
+
if (ldlm_flock_deadlock(req, lock)) {
ldlm_flock_blocking_unlink(req);
ldlm_flock_destroy(req, mode, *flags);
@@ -665,23 +650,20 @@ granted:
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.*/
- ldlm_flock_destroy(lock, flock_type(getlk),
- LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
- flock_set_type(getlk, F_RDLCK);
+ getlk->fl_type = F_RDLCK;
break;
case LCK_PW:
- flock_set_type(getlk, F_WRLCK);
+ getlk->fl_type = F_WRLCK;
break;
default:
- flock_set_type(getlk, F_UNLCK);
+ getlk->fl_type = F_UNLCK;
}
- flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
- flock_set_start(getlk,
- (loff_t)lock->l_policy_data.l_flock.start);
- flock_set_end(getlk,
- (loff_t)lock->l_policy_data.l_flock.end);
+ getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
+ getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
+ getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
} else {
__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
@@ -816,6 +798,9 @@ static cfs_hash_ops_t ldlm_export_flock_ops = {
int ldlm_init_flock_export(struct obd_export *exp)
{
+ if (strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
+ return 0;
+
exp->exp_flock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3900a69..692623b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -145,6 +145,8 @@ char *ldlm_it2str(int it)
return "getxattr";
case IT_LAYOUT:
return "layout";
+ case IT_SETXATTR:
+ return "setxattr";
default:
CERROR("Unknown intent %d\n", it);
return "UNKNOWN";
@@ -799,7 +801,7 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
* Removes reader/writer reference for LDLM lock \a lock.
* Assumes LDLM lock is already locked.
* only called in ldlm_flock_destroy and for local locks.
- * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
+ * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
* that cannot be placed in LRU.
*/
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
@@ -1129,6 +1131,11 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
if (lock == old_lock)
break;
+ /* Check if this lock can be matched.
+ * Used by LU-2919(exclusive open) for open lease lock */
+ if (ldlm_is_excl(lock))
+ continue;
+
/* llite sometimes wants to match locks that will be
* canceled when their users drop, but we allow it to match
* if it passes in CBPENDING and the lock still has users.
@@ -1247,7 +1254,7 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
* list will be considered
* If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
* to be canceled can still be matched as long as they still have reader
- * or writer refernces
+ * or writer referneces
* If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
* just tell us if we would have matched.
*
@@ -2090,8 +2097,8 @@ void ldlm_cancel_locks_for_export(struct obd_export *exp)
/**
* Downgrade an exclusive lock.
*
- * A fast variant of ldlm_lock_convert for convertion of exclusive
- * locks. The convertion is always successful.
+ * A fast variant of ldlm_lock_convert for conversion of exclusive
+ * locks. The conversion is always successful.
* Used by Commit on Sharing (COS) code.
*
* \param lock A lock to convert
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index fde9bcd..3ed020e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -49,12 +49,12 @@
#include "ldlm_internal.h"
static int ldlm_num_threads;
-CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
- "number of DLM service threads to start");
+module_param(ldlm_num_threads, int, 0444);
+MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
static char *ldlm_cpts;
-CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
- "CPU partitions ldlm threads should run on");
+module_param(ldlm_cpts, charp, 0444);
+MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
extern struct kmem_cache *ldlm_resource_slab;
extern struct kmem_cache *ldlm_lock_slab;
@@ -597,45 +597,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_handle_setinfo(req);
ldlm_callback_reply(req, rc);
return 0;
- case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
- CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
- req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
- return 0;
- rc = llog_origin_handle_cancel(req);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
- return 0;
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_CREATE:
- req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_open(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
- req_capsule_set(&req->rq_pill,
- &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_next_block(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_READ_HEADER:
- req_capsule_set(&req->rq_pill,
- &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_read_header(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_CLOSE:
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_close(req);
- ldlm_callback_reply(req, rc);
- return 0;
case OBD_QC_CALLBACK:
req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
@@ -1003,6 +964,7 @@ static cfs_hash_ops_t ldlm_export_lock_ops = {
int ldlm_init_export(struct obd_export *exp)
{
+ int rc;
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
@@ -1016,7 +978,14 @@ int ldlm_init_export(struct obd_export *exp)
if (!exp->exp_lock_hash)
return -ENOMEM;
+ rc = ldlm_init_flock_export(exp);
+ if (rc)
+ GOTO(err, rc);
+
return 0;
+err:
+ ldlm_destroy_export(exp);
+ return rc;
}
EXPORT_SYMBOL(ldlm_init_export);
@@ -1043,11 +1012,9 @@ static int ldlm_setup(void)
if (ldlm_state == NULL)
return -ENOMEM;
-#ifdef LPROCFS
rc = ldlm_proc_setup();
if (rc != 0)
GOTO(out, rc);
-#endif
memset(&conf, 0, sizeof(conf));
conf = (typeof(conf)) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 0025ee6..6758646 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -638,6 +638,7 @@ int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
}
EXPORT_SYMBOL(ldlm_pool_setup);
+#ifdef LPROCFS
static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
int granted, grant_rate, cancel_rate, grant_step;
@@ -822,6 +823,14 @@ static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
pl->pl_proc_dir = NULL;
}
}
+#else /* !LPROCFS */
+static int ldlm_pool_proc_init(struct ldlm_pool *pl)
+{
+ return 0;
+}
+
+static void ldlm_pool_proc_fini(struct ldlm_pool *pl) {}
+#endif /* LPROCFS */
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index dcc2784..c0e54ae 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -68,8 +68,8 @@
#include "ldlm_internal.h"
int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-CFS_MODULE_PARM(ldlm_enqueue_min, "i", int, 0644,
- "lock enqueue timeout minimum");
+module_param(ldlm_enqueue_min, int, 0644);
+MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
/* in client side, whether the cached locks will be canceled before replay */
unsigned int ldlm_cancel_unused_locks_before_replay = 1;
@@ -97,9 +97,6 @@ int ldlm_expired_completion_wait(void *data)
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
- if (ptlrpc_check_suspend())
- return 0;
-
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
lock->l_last_activity,
@@ -610,18 +607,12 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock->l_req_mode = newmode;
}
- if (memcmp(reply->lock_desc.l_resource.lr_name.name,
- lock->l_resource->lr_name.name,
- sizeof(struct ldlm_res_id))) {
- CDEBUG(D_INFO, "remote intent success, locking "
- "(%ld,%ld,%ld) instead of "
- "(%ld,%ld,%ld)\n",
- (long)reply->lock_desc.l_resource.lr_name.name[0],
- (long)reply->lock_desc.l_resource.lr_name.name[1],
- (long)reply->lock_desc.l_resource.lr_name.name[2],
- (long)lock->l_resource->lr_name.name[0],
- (long)lock->l_resource->lr_name.name[1],
- (long)lock->l_resource->lr_name.name[2]);
+ if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
+ &lock->l_resource->lr_name)) {
+ CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
+ " instead of "DLDLMRES"\n",
+ PLDLMRES(&reply->lock_desc.l_resource),
+ PLDLMRES(lock->l_resource));
rc = ldlm_lock_change_resource(ns, lock,
&reply->lock_desc.l_resource.lr_name);
@@ -790,7 +781,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
LASSERT(dlm);
/* Skip first lock handler in ldlm_request_pack(),
- * this method will incrment @lock_count according
+ * this method will increment @lock_count according
* to the lock handle amount actually written to
* the buffer. */
dlm->lock_count = canceloff;
@@ -910,7 +901,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_conn_export = exp;
lock->l_export = NULL;
lock->l_blocking_ast = einfo->ei_cb_bl;
- lock->l_flags |= (*flags & LDLM_FL_NO_LRU);
+ lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
/* lock not sent to server yet */
@@ -1333,7 +1324,7 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
}
rc = ldlm_cli_cancel_local(lock);
- if (rc == LDLM_FL_LOCAL_ONLY) {
+ if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
LDLM_LOCK_RELEASE(lock);
return 0;
}
@@ -1593,7 +1584,7 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
* the beginning of LRU list);
*
* flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- * memory pressre policy function;
+ * memory pressure policy function;
*
* flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
*
@@ -1912,7 +1903,8 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
0, flags | LCF_BL_AST, opaque);
rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+ CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
+ PLDLMRES(res), rc);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
@@ -1930,15 +1922,10 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
struct ldlm_cli_cancel_arg *lc = arg;
- int rc;
- rc = ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
- NULL, LCK_MINMODE,
- lc->lc_flags, lc->lc_opaque);
- if (rc != 0) {
- CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
- res->lr_name.name[0], rc);
- }
+ ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
+ NULL, LCK_MINMODE,
+ lc->lc_flags, lc->lc_opaque);
/* must return 0 for hash iteration */
return 0;
}
@@ -2089,7 +2076,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
/* bug 9573: don't replay locks left after eviction, or
* bug 17614: locks being actively cancelled. Get a reference
- * on a lock so that it does not disapear under us (e.g. due to cancel)
+ * on a lock so that it does not disappear under us (e.g. due to cancel)
*/
if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 77e022b..5f89864 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -162,7 +162,7 @@ static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
struct cfs_hash_bd bd;
int i;
- /* result is not strictly consistant */
+ /* result is not strictly consistent */
cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
res += cfs_hash_bd_count_get(&bd);
return lprocfs_rd_u64(m, &res);
@@ -762,16 +762,9 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
lock_res(res);
- CERROR("Namespace %s resource refcount nonzero "
- "(%d) after lock cleanup; forcing "
- "cleanup.\n",
- ldlm_ns_name(ldlm_res_to_ns(res)),
- atomic_read(&res->lr_refcount) - 1);
-
- CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
- LPU64") (rc: %d)\n", res,
- res->lr_name.name[0], res->lr_name.name[1],
- res->lr_name.name[2], res->lr_name.name[3],
+ CERROR("%s: namespace resource "DLDLMRES
+ " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
+ ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
atomic_read(&res->lr_refcount) - 1);
ldlm_resource_dump(D_ERROR, res);
@@ -881,7 +874,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
/*
* With all requests dropped and the import inactive
- * we are gaurenteed all reference will be dropped.
+ * we are guaranteed all reference will be dropped.
*/
rc = __ldlm_namespace_free(ns, 1);
LASSERT(rc == 0);
@@ -1403,10 +1396,8 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
if (!((libcfs_debug | D_ERROR) & level))
return;
- CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
- ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
- res->lr_name.name[2], res->lr_name.name[3],
- atomic_read(&res->lr_refcount));
+ CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
+ PLDLMRES(res), res, atomic_read(&res->lr_refcount));
if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 9b9c451..f30c84f 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -47,44 +47,44 @@
static char debug_file_name[1024];
unsigned int libcfs_subsystem_debug = ~0;
-CFS_MODULE_PARM(libcfs_subsystem_debug, "i", int, 0644,
- "Lustre kernel debug subsystem mask");
+module_param(libcfs_subsystem_debug, int, 0644);
+MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
EXPORT_SYMBOL(libcfs_subsystem_debug);
unsigned int libcfs_debug = (D_CANTMASK |
D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
-CFS_MODULE_PARM(libcfs_debug, "i", int, 0644,
- "Lustre kernel debug mask");
+module_param(libcfs_debug, int, 0644);
+MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
EXPORT_SYMBOL(libcfs_debug);
unsigned int libcfs_debug_mb = 0;
-CFS_MODULE_PARM(libcfs_debug_mb, "i", uint, 0644,
- "Total debug buffer size.");
+module_param(libcfs_debug_mb, uint, 0644);
+MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
EXPORT_SYMBOL(libcfs_debug_mb);
unsigned int libcfs_printk = D_CANTMASK;
-CFS_MODULE_PARM(libcfs_printk, "i", uint, 0644,
- "Lustre kernel debug console mask");
+module_param(libcfs_printk, uint, 0644);
+MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask");
EXPORT_SYMBOL(libcfs_printk);
unsigned int libcfs_console_ratelimit = 1;
-CFS_MODULE_PARM(libcfs_console_ratelimit, "i", uint, 0644,
- "Lustre kernel debug console ratelimit (0 to disable)");
+module_param(libcfs_console_ratelimit, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
EXPORT_SYMBOL(libcfs_console_ratelimit);
unsigned int libcfs_console_max_delay;
-CFS_MODULE_PARM(libcfs_console_max_delay, "l", uint, 0644,
- "Lustre kernel debug console max delay (jiffies)");
+module_param(libcfs_console_max_delay, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
EXPORT_SYMBOL(libcfs_console_max_delay);
unsigned int libcfs_console_min_delay;
-CFS_MODULE_PARM(libcfs_console_min_delay, "l", uint, 0644,
- "Lustre kernel debug console min delay (jiffies)");
+module_param(libcfs_console_min_delay, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
EXPORT_SYMBOL(libcfs_console_min_delay);
unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-CFS_MODULE_PARM(libcfs_console_backoff, "i", uint, 0644,
- "Lustre kernel debug console backoff factor");
+module_param(libcfs_console_backoff, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
EXPORT_SYMBOL(libcfs_console_backoff);
unsigned int libcfs_debug_binary = 1;
@@ -103,8 +103,8 @@ unsigned int libcfs_watchdog_ratelimit = 300;
EXPORT_SYMBOL(libcfs_watchdog_ratelimit);
unsigned int libcfs_panic_on_lbug = 1;
-CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
- "Lustre kernel panic on LBUG");
+module_param(libcfs_panic_on_lbug, uint, 0644);
+MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
EXPORT_SYMBOL(libcfs_panic_on_lbug);
atomic_t libcfs_kmemory = ATOMIC_INIT(0);
@@ -116,9 +116,9 @@ char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
/* We need to pass a pointer here, but elsewhere this must be a const */
char *libcfs_debug_file_path;
-CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
- "Path for dumping debug logs, "
- "set 'NONE' to prevent log dumping");
+module_param(libcfs_debug_file_path, charp, 0644);
+MODULE_PARM_DESC(libcfs_debug_file_path,
+ "Path for dumping debug logs, set 'NONE' to prevent log dumping");
int libcfs_panic_in_progress;
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index e3e0578..6d2b455 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -51,11 +51,11 @@
* - move all stuff to libcfs
* - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
* - ignore hs_rwlock if without CFS_HASH_REHASH setting
- * - buckets are allocated one by one(intead of contiguous memory),
+ * - buckets are allocated one by one(instead of contiguous memory),
* to avoid unnecessary cacheline conflict
*
* 2010-03-01: Liang Zhen <zhen.liang@sun.com>
- * - "bucket" is a group of hlist_head now, user can speicify bucket size
+ * - "bucket" is a group of hlist_head now, user can specify bucket size
* by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
* one lock for reducing memory overhead.
*
@@ -112,8 +112,8 @@
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static unsigned int warn_on_depth = 8;
-CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
- "warning when hash depth is high.");
+module_param(warn_on_depth, uint, 0644);
+MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif
struct cfs_wi_sched *cfs_sched_rehash;
@@ -1386,7 +1386,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
/*
* NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
* because it's just an unreliable signal to rehash-thread,
- * rehash-thread will try to finsih rehash ASAP when seeing this.
+ * rehash-thread will try to finish rehash ASAP when seeing this.
*/
hs->hs_iterating = 1;
@@ -1394,7 +1394,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
hs->hs_iterators++;
/* NB: iteration is mostly called by service thread,
- * we tend to cancel pending rehash-requst, instead of
+ * we tend to cancel pending rehash-request, instead of
* blocking service thread, we will relaunch rehash request
* after iteration */
if (cfs_hash_is_rehashing(hs))
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
index 74a0db5..7b2c315 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(libcfs_kkuc_msg_put);
/* Broadcast groups are global across all mounted filesystems;
* i.e. registering for a group on 1 fs will get messages for that
* group from any fs */
-/** A single group reigstration has a uid and a file pointer */
+/** A single group registration has a uid and a file pointer */
struct kkuc_reg {
struct list_head kr_chain;
int kr_uid;
@@ -206,7 +206,7 @@ static DECLARE_RWSEM(kg_sem);
/** Add a receiver to a broadcast group
* @param filp pipe to write into
- * @param uid identidier for this receiver
+ * @param uid identifier for this receiver
* @param group group number
*/
int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data)
@@ -330,9 +330,8 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL) {
+ if (reg->kr_fp != NULL)
rc = cb_func(reg->kr_data, cb_arg);
- }
}
up_read(&kg_sem);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 00ab8fd..58bb256 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -47,7 +47,8 @@
* >1 : specify number of partitions
*/
static int cpu_npartitions;
-CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
+module_param(cpu_npartitions, int, 0444);
+MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
/**
* modparam for setting CPU partitions patterns:
@@ -61,7 +62,8 @@ CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
* NB: If user specified cpu_pattern, cpu_npartitions will be ignored
*/
static char *cpu_pattern = "";
-CFS_MODULE_PARM(cpu_pattern, "s", charp, 0444, "CPU partitions pattern");
+module_param(cpu_pattern, charp, 0444);
+MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
struct cfs_cpt_data {
/* serialize hotplug etc */
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index 0bf8e5d..a2ef64c 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -140,18 +140,6 @@ int cfs_capable(cfs_cap_t cap)
return capable(cfs_cap_unpack(cap));
}
-/* Check if task is running in 32-bit API mode, for the purpose of
- * userspace binary interfaces. On 32-bit Linux this is (unfortunately)
- * always true, even if the application is using LARGEFILE64 and 64-bit
- * APIs, because Linux provides no way for the filesystem to know if it
- * is called via 32-bit or 64-bit APIs. Other clients may vary. On
- * 64-bit systems, this will only be true if the binary is calling a
- * 32-bit system call. */
-int current_is_32bit(void)
-{
- return is_compat_task();
-}
-
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
@@ -311,7 +299,6 @@ EXPORT_SYMBOL(cfs_cap_raised);
EXPORT_SYMBOL(cfs_curproc_cap_pack);
EXPORT_SYMBOL(cfs_curproc_cap_unpack);
EXPORT_SYMBOL(cfs_capable);
-EXPORT_SYMBOL(current_is_32bit);
/*
* Local variables:
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
index cc9829f..c7bc7fc 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
@@ -46,13 +46,10 @@
#include <asm/kgdb.h>
#endif
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
-
void
init_waitqueue_entry_current(wait_queue_t *link)
{
- init_waitqueue_entry(LINUX_WAITQ(link), current);
+ init_waitqueue_entry(link, current);
}
EXPORT_SYMBOL(init_waitqueue_entry_current);
@@ -74,9 +71,9 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
{
unsigned long flags;
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ spin_lock_irqsave(&waitq->lock, flags);
+ __add_wait_queue_exclusive(waitq, link);
+ spin_unlock_irqrestore(&waitq->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive_head);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
index fc6c977..e947b91 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -65,9 +65,7 @@
#include <asm/div64.h>
#include "tracefile.h"
-#ifdef CONFIG_SYSCTL
static ctl_table_header_t *lnet_table_header = NULL;
-#endif
extern char lnet_upcall[1024];
/**
* The path of debug log dump upcall script.
@@ -371,7 +369,6 @@ static ctl_table_t lnet_table[] = {
* to go via /proc for portability.
*/
{
- INIT_CTL_NAME(PSDEV_DEBUG)
.procname = "debug",
.data = &libcfs_debug,
.maxlen = sizeof(int),
@@ -379,7 +376,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- INIT_CTL_NAME(PSDEV_SUBSYSTEM_DEBUG)
.procname = "subsystem_debug",
.data = &libcfs_subsystem_debug,
.maxlen = sizeof(int),
@@ -387,7 +383,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- INIT_CTL_NAME(PSDEV_PRINTK)
.procname = "printk",
.data = &libcfs_printk,
.maxlen = sizeof(int),
@@ -395,7 +390,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_RATELIMIT)
.procname = "console_ratelimit",
.data = &libcfs_console_ratelimit,
.maxlen = sizeof(int),
@@ -403,21 +397,18 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_MAX_DELAY_CS)
.procname = "console_max_delay_centisecs",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_console_max_delay_cs
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_MIN_DELAY_CS)
.procname = "console_min_delay_centisecs",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_console_min_delay_cs
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_BACKOFF)
.procname = "console_backoff",
.maxlen = sizeof(int),
.mode = 0644,
@@ -425,7 +416,6 @@ static ctl_table_t lnet_table[] = {
},
{
- INIT_CTL_NAME(PSDEV_DEBUG_PATH)
.procname = "debug_path",
.data = libcfs_debug_file_path_arr,
.maxlen = sizeof(libcfs_debug_file_path_arr),
@@ -434,7 +424,6 @@ static ctl_table_t lnet_table[] = {
},
{
- INIT_CTL_NAME(PSDEV_CPT_TABLE)
.procname = "cpu_partition_table",
.maxlen = 128,
.mode = 0444,
@@ -442,7 +431,6 @@ static ctl_table_t lnet_table[] = {
},
{
- INIT_CTL_NAME(PSDEV_LNET_UPCALL)
.procname = "upcall",
.data = lnet_upcall,
.maxlen = sizeof(lnet_upcall),
@@ -450,7 +438,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dostring,
},
{
- INIT_CTL_NAME(PSDEV_LNET_DEBUG_LOG_UPCALL)
.procname = "debug_log_upcall",
.data = lnet_debug_log_upcall,
.maxlen = sizeof(lnet_debug_log_upcall),
@@ -458,54 +445,44 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dostring,
},
{
- INIT_CTL_NAME(PSDEV_LNET_MEMUSED)
.procname = "lnet_memused",
.data = (int *)&libcfs_kmemory.counter,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
- INIT_STRATEGY(&sysctl_intvec)
},
{
- INIT_CTL_NAME(PSDEV_LNET_CATASTROPHE)
.procname = "catastrophe",
.data = &libcfs_catastrophe,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
- INIT_STRATEGY(&sysctl_intvec)
},
{
- INIT_CTL_NAME(PSDEV_LNET_PANIC_ON_LBUG)
.procname = "panic_on_lbug",
.data = &libcfs_panic_on_lbug,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
- INIT_STRATEGY(&sysctl_intvec)
},
{
- INIT_CTL_NAME(PSDEV_LNET_DUMP_KERNEL)
.procname = "dump_kernel",
.maxlen = 256,
.mode = 0200,
.proc_handler = &proc_dump_kernel,
},
{
- INIT_CTL_NAME(PSDEV_LNET_DAEMON_FILE)
.procname = "daemon_file",
.mode = 0644,
.maxlen = 256,
.proc_handler = &proc_daemon_file,
},
{
- INIT_CTL_NAME(PSDEV_LNET_DEBUG_MB)
.procname = "debug_mb",
.mode = 0644,
.proc_handler = &proc_debug_mb,
},
{
- INIT_CTL_NAME(PSDEV_LNET_WATCHDOG_RATELIMIT)
.procname = "watchdog_ratelimit",
.data = &libcfs_watchdog_ratelimit,
.maxlen = sizeof(int),
@@ -514,7 +491,7 @@ static ctl_table_t lnet_table[] = {
.extra1 = &min_watchdog_ratelimit,
.extra2 = &max_watchdog_ratelimit,
},
- { INIT_CTL_NAME(PSDEV_LNET_FORCE_LBUG)
+ {
.procname = "force_lbug",
.data = NULL,
.maxlen = 0,
@@ -522,7 +499,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &libcfs_force_lbug
},
{
- INIT_CTL_NAME(PSDEV_LNET_FAIL_LOC)
.procname = "fail_loc",
.data = &cfs_fail_loc,
.maxlen = sizeof(cfs_fail_loc),
@@ -530,7 +506,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_fail_loc
},
{
- INIT_CTL_NAME(PSDEV_LNET_FAIL_VAL)
.procname = "fail_val",
.data = &cfs_fail_val,
.maxlen = sizeof(int),
@@ -538,14 +513,11 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(0)
}
};
-#ifdef CONFIG_SYSCTL
static ctl_table_t top_table[] = {
{
- INIT_CTL_NAME(CTL_LNET)
.procname = "lnet",
.mode = 0555,
.data = NULL,
@@ -553,26 +525,20 @@ static ctl_table_t top_table[] = {
.child = lnet_table,
},
{
- INIT_CTL_NAME(0)
}
};
-#endif
int insert_proc(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header == NULL)
lnet_table_header = register_sysctl_table(top_table);
-#endif
return 0;
}
void remove_proc(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header != NULL)
unregister_sysctl_table(lnet_table_header);
lnet_table_header = NULL;
-#endif
}
diff --git a/drivers/staging/lustre/lustre/libcfs/lwt.c b/drivers/staging/lustre/lustre/libcfs/lwt.c
deleted file mode 100644
index b631f7d..0000000
--- a/drivers/staging/lustre/lustre/libcfs/lwt.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/lwt.c
- *
- * Author: Eric Barton <eeb@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-#if LWT_SUPPORT
-
-#if !KLWT_SUPPORT
-int lwt_enabled;
-lwt_cpu_t lwt_cpus[NR_CPUS];
-#endif
-
-int lwt_pages_per_cpu;
-
-/* NB only root is allowed to retrieve LWT info; it's an open door into the
- * kernel... */
-
-int
-lwt_lookup_string (int *size, char *knl_ptr,
- char *user_ptr, int user_size)
-{
- int maxsize = 128;
-
- /* knl_ptr was retrieved from an LWT snapshot and the caller wants to
- * turn it into a string. NB we can crash with an access violation
- * trying to determine the string length, so we're trusting our
- * caller... */
-
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
-
- if (user_size > 0 &&
- maxsize > user_size)
- maxsize = user_size;
-
- *size = strnlen (knl_ptr, maxsize - 1) + 1;
-
- if (user_ptr != NULL) {
- if (user_size < 4)
- return (-EINVAL);
-
- if (copy_to_user (user_ptr, knl_ptr, *size))
- return (-EFAULT);
-
- /* Did I truncate the string? */
- if (knl_ptr[*size - 1] != 0)
- copy_to_user (user_ptr + *size - 4, "...", 4);
- }
-
- return (0);
-}
-
-int
-lwt_control (int enable, int clear)
-{
- lwt_page_t *p;
- int i;
- int j;
-
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
-
- if (!enable) {
- LWT_EVENT(0,0,0,0);
- lwt_enabled = 0;
- mb();
- /* give people some time to stop adding traces */
- schedule_timeout(10);
- }
-
- for (i = 0; i < num_online_cpus(); i++) {
- p = lwt_cpus[i].lwtc_current_page;
-
- if (p == NULL)
- return (-ENODATA);
-
- if (!clear)
- continue;
-
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- memset (p->lwtp_events, 0, PAGE_CACHE_SIZE);
-
- p = list_entry (p->lwtp_list.next,
- lwt_page_t, lwtp_list);
- }
- }
-
- if (enable) {
- lwt_enabled = 1;
- mb();
- LWT_EVENT(0,0,0,0);
- }
-
- return (0);
-}
-
-int
-lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
- void *user_ptr, int user_size)
-{
- const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t);
- const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
- lwt_page_t *p;
- int i;
- int j;
-
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
-
- *ncpu = num_online_cpus();
- *total_size = num_online_cpus() * lwt_pages_per_cpu *
- bytes_per_page;
- *now = get_cycles();
-
- if (user_ptr == NULL)
- return (0);
-
- for (i = 0; i < num_online_cpus(); i++) {
- p = lwt_cpus[i].lwtc_current_page;
-
- if (p == NULL)
- return (-ENODATA);
-
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- if (copy_to_user(user_ptr, p->lwtp_events,
- bytes_per_page))
- return (-EFAULT);
-
- user_ptr = ((char *)user_ptr) + bytes_per_page;
- p = list_entry(p->lwtp_list.next,
- lwt_page_t, lwtp_list);
- }
- }
-
- return (0);
-}
-
-int
-lwt_init ()
-{
- int i;
- int j;
-
- for (i = 0; i < num_online_cpus(); i++)
- if (lwt_cpus[i].lwtc_current_page != NULL)
- return (-EALREADY);
-
- LASSERT (!lwt_enabled);
-
- /* NULL pointers, zero scalars */
- memset (lwt_cpus, 0, sizeof (lwt_cpus));
- lwt_pages_per_cpu =
- LWT_MEMORY / (num_online_cpus() * PAGE_CACHE_SIZE);
-
- for (i = 0; i < num_online_cpus(); i++)
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- struct page *page = alloc_page (GFP_KERNEL);
- lwt_page_t *lwtp;
-
- if (page == NULL) {
- CERROR ("Can't allocate page\n");
- lwt_fini ();
- return (-ENOMEM);
- }
-
- LIBCFS_ALLOC(lwtp, sizeof (*lwtp));
- if (lwtp == NULL) {
- CERROR ("Can't allocate lwtp\n");
- __free_page(page);
- lwt_fini ();
- return (-ENOMEM);
- }
-
- lwtp->lwtp_page = page;
- lwtp->lwtp_events = page_address(page);
- memset (lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
-
- if (j == 0) {
- INIT_LIST_HEAD (&lwtp->lwtp_list);
- lwt_cpus[i].lwtc_current_page = lwtp;
- } else {
- list_add (&lwtp->lwtp_list,
- &lwt_cpus[i].lwtc_current_page->lwtp_list);
- }
- }
-
- lwt_enabled = 1;
- mb();
-
- LWT_EVENT(0,0,0,0);
-
- return (0);
-}
-
-void
-lwt_fini ()
-{
- int i;
-
- lwt_control(0, 0);
-
- for (i = 0; i < num_online_cpus(); i++)
- while (lwt_cpus[i].lwtc_current_page != NULL) {
- lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
-
- if (list_empty (&lwtp->lwtp_list)) {
- lwt_cpus[i].lwtc_current_page = NULL;
- } else {
- lwt_cpus[i].lwtc_current_page =
- list_entry (lwtp->lwtp_list.next,
- lwt_page_t, lwtp_list);
-
- list_del (&lwtp->lwtp_list);
- }
-
- __free_page (lwtp->lwtp_page);
- LIBCFS_FREE (lwtp, sizeof (*lwtp));
- }
-}
-
-EXPORT_SYMBOL(lwt_enabled);
-EXPORT_SYMBOL(lwt_cpus);
-
-EXPORT_SYMBOL(lwt_init);
-EXPORT_SYMBOL(lwt_fini);
-EXPORT_SYMBOL(lwt_lookup_string);
-EXPORT_SYMBOL(lwt_control);
-EXPORT_SYMBOL(lwt_snapshot);
-#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index f3108c7..24ae26d 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -235,41 +235,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
return -EINVAL;
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
return 0;
-#if LWT_SUPPORT
- case IOC_LIBCFS_LWT_CONTROL:
- err = lwt_control ((data->ioc_flags & 1) != 0,
- (data->ioc_flags & 2) != 0);
- break;
-
- case IOC_LIBCFS_LWT_SNAPSHOT: {
- cfs_cycles_t now;
- int ncpu;
- int total_size;
-
- err = lwt_snapshot (&now, &ncpu, &total_size,
- data->ioc_pbuf1, data->ioc_plen1);
- data->ioc_u64[0] = now;
- data->ioc_u32[0] = ncpu;
- data->ioc_u32[1] = total_size;
-
- /* Hedge against broken user/kernel typedefs (e.g. cycles_t) */
- data->ioc_u32[2] = sizeof(lwt_event_t);
- data->ioc_u32[3] = offsetof(lwt_event_t, lwte_where);
-
- if (err == 0 &&
- libcfs_ioctl_popdata(arg, data, sizeof (*data)))
- err = -EFAULT;
- break;
- }
-
- case IOC_LIBCFS_LWT_LOOKUP_STRING:
- err = lwt_lookup_string (&data->ioc_count, data->ioc_pbuf1,
- data->ioc_pbuf2, data->ioc_plen2);
- if (err == 0 &&
- libcfs_ioctl_popdata(arg, data, sizeof (*data)))
- err = -EFAULT;
- break;
-#endif
case IOC_LIBCFS_MEMHOG:
if (pfile->private_data == NULL) {
err = -EINVAL;
@@ -392,17 +357,10 @@ static int init_libcfs_module(void)
if (rc != 0)
goto cleanup_debug;
-#if LWT_SUPPORT
- rc = lwt_init();
- if (rc != 0) {
- CERROR("lwt_init: error %d\n", rc);
- goto cleanup_debug;
- }
-#endif
rc = misc_register(&libcfs_dev);
if (rc) {
CERROR("misc_register: error %d\n", rc);
- goto cleanup_lwt;
+ goto cleanup_cpu;
}
rc = cfs_wi_startup();
@@ -422,7 +380,7 @@ static int init_libcfs_module(void)
rc = cfs_crypto_register();
if (rc) {
- CERROR("cfs_crypto_regster: error %d\n", rc);
+ CERROR("cfs_crypto_register: error %d\n", rc);
goto cleanup_wi;
}
@@ -441,10 +399,8 @@ static int init_libcfs_module(void)
cfs_wi_shutdown();
cleanup_deregister:
misc_deregister(&libcfs_dev);
- cleanup_lwt:
-#if LWT_SUPPORT
- lwt_fini();
-#endif
+cleanup_cpu:
+ cfs_cpu_fini();
cleanup_debug:
libcfs_debug_cleanup();
return rc;
@@ -471,9 +427,6 @@ static void exit_libcfs_module(void)
if (rc)
CERROR("misc_deregister error %d\n", rc);
-#if LWT_SUPPORT
- lwt_fini();
-#endif
cfs_cpu_fini();
if (atomic_read(&libcfs_kmemory) != 0)
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
index 99c9e9d..732ae55 100644
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -56,11 +56,11 @@
*/
static char libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
-static int libcfs_nidstring_idx = 0;
+static int libcfs_nidstring_idx;
static spinlock_t libcfs_nidstring_lock;
-void libcfs_init_nidstrings (void)
+void libcfs_init_nidstrings(void)
{
spin_lock_init(&libcfs_nidstring_lock);
}
@@ -69,7 +69,7 @@ void libcfs_init_nidstrings (void)
# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
static char *
-libcfs_next_nidstring (void)
+libcfs_next_nidstring(void)
{
char *str;
unsigned long flags;
@@ -326,6 +326,7 @@ libcfs_isknown_lnd(int type)
{
return libcfs_lnd2netstrfns(type) != NULL;
}
+EXPORT_SYMBOL(libcfs_isknown_lnd);
char *
libcfs_lnd2modname(int lnd)
@@ -334,6 +335,7 @@ libcfs_lnd2modname(int lnd)
return (nf == NULL) ? NULL : nf->nf_modname;
}
+EXPORT_SYMBOL(libcfs_lnd2modname);
char *
libcfs_lnd2str(int lnd)
@@ -348,6 +350,7 @@ libcfs_lnd2str(int lnd)
snprintf(str, LNET_NIDSTR_SIZE, "?%u?", lnd);
return str;
}
+EXPORT_SYMBOL(libcfs_lnd2str);
int
libcfs_str2lnd(const char *str)
@@ -359,6 +362,7 @@ libcfs_str2lnd(const char *str)
return -1;
}
+EXPORT_SYMBOL(libcfs_str2lnd);
char *
libcfs_net2str(__u32 net)
@@ -377,6 +381,7 @@ libcfs_net2str(__u32 net)
return str;
}
+EXPORT_SYMBOL(libcfs_net2str);
char *
libcfs_nid2str(lnet_nid_t nid)
@@ -410,6 +415,7 @@ libcfs_nid2str(lnet_nid_t nid)
return str;
}
+EXPORT_SYMBOL(libcfs_nid2str);
static struct netstrfns *
libcfs_str2net_internal(const char *str, __u32 *net)
@@ -458,6 +464,7 @@ libcfs_str2net(const char *str)
return LNET_NIDNET(LNET_NID_ANY);
}
+EXPORT_SYMBOL(libcfs_str2net);
lnet_nid_t
libcfs_str2nid(const char *str)
@@ -475,7 +482,7 @@ libcfs_str2nid(const char *str)
sep = str + strlen(str);
net = LNET_MKNET(SOCKLND, 0);
nf = libcfs_lnd2netstrfns(SOCKLND);
- LASSERT (nf != NULL);
+ LASSERT(nf != NULL);
}
if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
@@ -483,6 +490,7 @@ libcfs_str2nid(const char *str)
return LNET_MKNID(net, addr);
}
+EXPORT_SYMBOL(libcfs_str2nid);
char *
libcfs_id2str(lnet_process_id_t id)
@@ -500,6 +508,7 @@ libcfs_id2str(lnet_process_id_t id)
(id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
return str;
}
+EXPORT_SYMBOL(libcfs_id2str);
int
libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
@@ -512,6 +521,7 @@ libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
*nidp = libcfs_str2nid(str);
return *nidp != LNET_NID_ANY;
}
+EXPORT_SYMBOL(libcfs_str2anynid);
/**
* Nid range list syntax.
@@ -765,6 +775,7 @@ cfs_free_nidlist(struct list_head *list)
LIBCFS_FREE(nr, sizeof(struct nidrange));
}
}
+EXPORT_SYMBOL(cfs_free_nidlist);
/**
* Parses nid range list.
@@ -803,6 +814,7 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
}
return 1;
}
+EXPORT_SYMBOL(cfs_parse_nidlist);
/*
* Nf_match_addr method for networks using numeric addresses
@@ -848,18 +860,4 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
}
return 0;
}
-
-
-EXPORT_SYMBOL(libcfs_isknown_lnd);
-EXPORT_SYMBOL(libcfs_lnd2modname);
-EXPORT_SYMBOL(libcfs_lnd2str);
-EXPORT_SYMBOL(libcfs_str2lnd);
-EXPORT_SYMBOL(libcfs_net2str);
-EXPORT_SYMBOL(libcfs_nid2str);
-EXPORT_SYMBOL(libcfs_str2net);
-EXPORT_SYMBOL(libcfs_str2nid);
-EXPORT_SYMBOL(libcfs_id2str);
-EXPORT_SYMBOL(libcfs_str2anynid);
-EXPORT_SYMBOL(cfs_free_nidlist);
-EXPORT_SYMBOL(cfs_parse_nidlist);
EXPORT_SYMBOL(cfs_match_nid);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index f71a3cc..54290ce 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -678,6 +678,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
struct file *filp;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ char *buf;
int rc;
DECL_MMSPACE;
@@ -708,8 +709,11 @@ int cfs_tracefile_dump_all_pages(char *filename)
__LASSERT_TAGE_INVARIANT(tage);
- rc = filp_write(filp, page_address(tage->page),
- tage->used, filp_poff(filp));
+ buf = kmap(tage->page);
+ rc = vfs_write(filp, (__force const char __user *)buf,
+ tage->used, &filp->f_pos);
+ kunmap(tage->page);
+
if (rc != (int)tage->used) {
printk(KERN_WARNING "wanted to write %u but wrote "
"%d\n", tage->used, rc);
@@ -721,7 +725,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
cfs_tage_free(tage);
}
MMSPACE_CLOSE;
- rc = filp_fsync(filp);
+ rc = vfs_fsync(filp, 1);
if (rc)
printk(KERN_ERR "sync returns %d\n", rc);
close:
@@ -971,6 +975,7 @@ static int tracefiled(void *arg)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
struct file *filp;
+ char *buf;
int last_loop = 0;
int rc;
@@ -1020,11 +1025,14 @@ static int tracefiled(void *arg)
if (f_pos >= (off_t)cfs_tracefile_size)
f_pos = 0;
- else if (f_pos > (off_t)filp_size(filp))
- f_pos = filp_size(filp);
+ else if (f_pos > i_size_read(filp->f_dentry->d_inode))
+ f_pos = i_size_read(filp->f_dentry->d_inode);
+
+ buf = kmap(tage->page);
+ rc = vfs_write(filp, (__force const char __user *)buf,
+ tage->used, &f_pos);
+ kunmap(tage->page);
- rc = filp_write(filp, page_address(tage->page),
- tage->used, &f_pos);
if (rc != (int)tage->used) {
printk(KERN_WARNING "wanted to write %u "
"but wrote %d\n", tage->used, rc);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index f493e07..c76f3cf 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,12 +1,13 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
- rw.o lproc_llite.o namei.o symlink.o llite_mmap.o \
- xattr.o remote_perm.o llite_rmtacl.o llite_capa.o \
+ rw.o namei.o symlink.o llite_mmap.o \
+ xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o llite_capa.o \
rw26.o super25.o statahead.o \
../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o
+lustre-$(CONFIG_PROC_FS) += lproc_llite.o
llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index e7629be..cbd663e 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -404,7 +404,6 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
struct inode *inode = de->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle **och_p;
- __u64 *och_usecount;
__u64 ibits;
/*
@@ -418,37 +417,32 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
*/
- if (it->it_flags & FMODE_WRITE) {
+ if (it->it_flags & FMODE_WRITE)
och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (it->it_flags & FMODE_EXEC) {
+ else if (it->it_flags & FMODE_EXEC)
och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
+ else
och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
+
/* Check for the proper lock. */
ibits = MDS_INODELOCK_LOOKUP;
if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock;
mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Everything is open already, do nothing */
- /*(*och_usecount)++; Do not let them steal our open
- handle from under us */
- SET_BUT_UNUSED(och_usecount);
- /* XXX The code above was my original idea, but in case
- we have the handle, but we cannot use it due to later
- checks (e.g. O_CREAT|O_EXCL flags set), nobody
- would decrement counter increased here. So we just
- hope the lock won't be invalidated in between. But
- if it would be, we'll reopen the open request to
- MDS later during file open path */
+ /* Originally it was idea to do not let them steal our
+ * open handle from under us by (*och_usecount)++ here.
+ * But in case we have the handle, but we cannot use it
+ * due to later checks (e.g. O_CREAT|O_EXCL flags set),
+ * nobody would decrement counter increased here. So we
+ * just hope the lock won't be invalidated in between.
+ * But if it would be, we'll reopen the open request to
+ * MDS later during file open path.
+ */
mutex_unlock(&lli->lli_och_mutex);
return 1;
- } else {
- mutex_unlock(&lli->lli_och_mutex);
}
+ mutex_unlock(&lli->lli_och_mutex);
}
if (it->it_op == IT_GETATTR) {
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 1f07903..22d0acc9 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1809,8 +1809,28 @@ out_rmdir:
return -EFAULT;
}
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
- hur, NULL);
+ if (hur->hur_request.hr_action == HUA_RELEASE) {
+ const struct lu_fid *fid;
+ struct inode *f;
+ int i;
+
+ for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
+ fid = &hur->hur_user_item[i].hui_fid;
+ f = search_inode_for_lustre(inode->i_sb, fid);
+ if (IS_ERR(f)) {
+ rc = PTR_ERR(f);
+ break;
+ }
+
+ rc = ll_hsm_release(f);
+ iput(f);
+ if (rc != 0)
+ break;
+ }
+ } else {
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
+ hur, NULL);
+ }
OBD_FREE_LARGE(hur, totalsize);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index fb85a58..c12821a 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -115,7 +115,8 @@ out:
static int ll_close_inode_openhandle(struct obd_export *md_exp,
struct inode *inode,
- struct obd_client_handle *och)
+ struct obd_client_handle *och,
+ const __u64 *data_version)
{
struct obd_export *exp = ll_i2mdexp(inode);
struct md_op_data *op_data;
@@ -139,6 +140,13 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
GOTO(out, rc = -ENOMEM); // XXX We leak openhandle and request here.
ll_prepare_close(inode, op_data, och);
+ if (data_version != NULL) {
+ /* Pass in data_version implies release. */
+ op_data->op_bias |= MDS_HSM_RELEASE;
+ op_data->op_data_version = *data_version;
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ }
epoch_close = (op_data->op_flags & MF_EPOCH_CLOSE);
rc = md_close(md_exp, op_data, och->och_mod, &req);
if (rc == -EAGAIN) {
@@ -167,14 +175,20 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
spin_unlock(&lli->lli_lock);
}
- ll_finish_md_op_data(op_data);
-
if (rc == 0) {
rc = ll_objects_destroy(req, inode);
if (rc)
CERROR("inode %lu ll_objects destroy: rc = %d\n",
inode->i_ino, rc);
}
+ if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
+ struct mdt_body *body;
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (!(body->valid & OBD_MD_FLRELEASED))
+ rc = -EBUSY;
+ }
+
+ ll_finish_md_op_data(op_data);
out:
if (exp_connect_som(exp) && !epoch_close &&
@@ -224,7 +238,7 @@ int ll_md_real_close(struct inode *inode, int flags)
if (och) { /* There might be a race and somebody have freed this och
already */
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och);
+ inode, och, NULL);
}
return rc;
@@ -241,6 +255,24 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+ if (fd->fd_lease_och != NULL) {
+ bool lease_broken;
+
+ /* Usually the lease is not released when the
+ * application crashed, we need to release here. */
+ rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
+ CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
+ PFID(&lli->lli_fid), rc, lease_broken);
+
+ fd->fd_lease_och = NULL;
+ }
+
+ if (fd->fd_och != NULL) {
+ rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
+ fd->fd_och = NULL;
+ GOTO(out, rc);
+ }
+
/* Let's see if we have good enough OPEN lock on the file and if
we can skip talking to MDS */
if (file->f_dentry->d_inode) { /* Can this ever be false? */
@@ -277,6 +309,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
file, file->f_dentry, file->f_dentry->d_name.name);
}
+out:
LUSTRE_FPRIVATE(file) = NULL;
ll_file_data_put(fd);
ll_capa_close(inode);
@@ -431,22 +464,18 @@ void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
}
}
-static int ll_och_fill(struct obd_export *md_exp, struct ll_inode_info *lli,
- struct lookup_intent *it, struct obd_client_handle *och)
+static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
+ struct obd_client_handle *och)
{
struct ptlrpc_request *req = it->d.lustre.it_data;
struct mdt_body *body;
- LASSERT(och);
-
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL); /* reply already checked out */
-
- memcpy(&och->och_fh, &body->handle, sizeof(body->handle));
+ och->och_fh = body->handle;
+ och->och_fid = body->fid1;
+ och->och_lease_handle.cookie = it->d.lustre.it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
- och->och_fid = lli->lli_fid;
och->och_flags = it->it_flags;
- ll_ioepoch_open(lli, body->ioepoch);
return md_set_open_replay_data(md_exp, och, req);
}
@@ -466,20 +495,17 @@ int ll_local_open(struct file *file, struct lookup_intent *it,
struct mdt_body *body;
int rc;
- rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, lli, it, och);
- if (rc)
+ rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
+ if (rc != 0)
return rc;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if ((it->it_flags & FMODE_WRITE) &&
- (body->valid & OBD_MD_FLSIZE))
- CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID"\n",
- lli->lli_ioepoch, PFID(&lli->lli_fid));
+ ll_ioepoch_open(lli, body->ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
ll_readahead_init(inode, &fd->fd_ras);
- fd->fd_omode = it->it_flags;
+ fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
return 0;
}
@@ -681,6 +707,198 @@ out_openerr:
return rc;
}
+static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *desc, void *data, int flag)
+{
+ int rc;
+ struct lustre_handle lockh;
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ if (rc < 0) {
+ CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
+ return rc;
+ }
+ break;
+ case LDLM_CB_CANCELING:
+ /* do nothing */
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Acquire a lease and open the file.
+ */
+struct obd_client_handle *ll_lease_open(struct inode *inode, struct file *file,
+ fmode_t fmode, __u64 open_flags)
+{
+ struct lookup_intent it = { .it_op = IT_OPEN };
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req;
+ struct lustre_handle old_handle = { 0 };
+ struct obd_client_handle *och = NULL;
+ int rc;
+ int rc2;
+
+ if (fmode != FMODE_WRITE && fmode != FMODE_READ)
+ return ERR_PTR(-EINVAL);
+
+ if (file != NULL) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+
+ if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
+ return ERR_PTR(-EPERM);
+
+ /* Get the openhandle of the file */
+ rc = -EBUSY;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ mutex_unlock(&lli->lli_och_mutex);
+ return ERR_PTR(rc);
+ }
+
+ if (fd->fd_och == NULL) {
+ if (file->f_mode & FMODE_WRITE) {
+ LASSERT(lli->lli_mds_write_och != NULL);
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else {
+ LASSERT(lli->lli_mds_read_och != NULL);
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+ if (*och_usecount == 1) {
+ fd->fd_och = *och_p;
+ *och_p = NULL;
+ *och_usecount = 0;
+ rc = 0;
+ }
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (rc < 0) /* more than 1 opener */
+ return ERR_PTR(rc);
+
+ LASSERT(fd->fd_och != NULL);
+ old_handle = fd->fd_och->och_fh;
+ }
+
+ OBD_ALLOC_PTR(och);
+ if (och == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ GOTO(out, rc = PTR_ERR(op_data));
+
+ /* To tell the MDT this openhandle is from the same owner */
+ op_data->op_handle = old_handle;
+
+ it.it_flags = fmode | open_flags;
+ it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req,
+ ll_md_blocking_lease_ast,
+ /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
+ * it can be cancelled which may mislead applications that the lease is
+ * broken;
+ * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
+ * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
+ * doesn't deal with openhandle, so normal openhandle will be leaked. */
+ LDLM_FL_NO_LRU | LDLM_FL_EXCL);
+ ll_finish_md_op_data(op_data);
+ if (req != NULL) {
+ ptlrpc_req_finished(req);
+ it_clear_disposition(&it, DISP_ENQ_COMPLETE);
+ }
+ if (rc < 0)
+ GOTO(out_release_it, rc);
+
+ if (it_disposition(&it, DISP_LOOKUP_NEG))
+ GOTO(out_release_it, rc = -ENOENT);
+
+ rc = it_open_error(DISP_OPEN_OPEN, &it);
+ if (rc)
+ GOTO(out_release_it, rc);
+
+ LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
+ ll_och_fill(sbi->ll_md_exp, &it, och);
+
+ if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
+ GOTO(out_close, rc = -EOPNOTSUPP);
+
+ /* already get lease, handle lease lock */
+ ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
+ if (it.d.lustre.it_lock_mode == 0 ||
+ it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
+ /* open lock must return for lease */
+ CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
+ PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
+ it.d.lustre.it_lock_bits);
+ GOTO(out_close, rc = -EPROTO);
+ }
+
+ ll_intent_release(&it);
+ return och;
+
+out_close:
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ if (rc2)
+ CERROR("Close openhandle returned %d\n", rc2);
+
+ /* cancel open lock */
+ if (it.d.lustre.it_lock_mode != 0) {
+ ldlm_lock_decref_and_cancel(&och->och_lease_handle,
+ it.d.lustre.it_lock_mode);
+ it.d.lustre.it_lock_mode = 0;
+ }
+out_release_it:
+ ll_intent_release(&it);
+out:
+ OBD_FREE_PTR(och);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(ll_lease_open);
+
+/**
+ * Release lease and close the file.
+ * It will check if the lease has ever broken.
+ */
+int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
+ bool *lease_broken)
+{
+ struct ldlm_lock *lock;
+ bool cancelled = true;
+ int rc;
+
+ lock = ldlm_handle2lock(&och->och_lease_handle);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
+ cancelled = ldlm_is_cancel(lock);
+ unlock_res_and_lock(lock);
+ ldlm_lock_put(lock);
+ }
+
+ CDEBUG(D_INODE, "lease for "DFID" broken? %d\n",
+ PFID(&ll_i2info(inode)->lli_fid), cancelled);
+
+ if (!cancelled)
+ ldlm_cli_cancel(&och->och_lease_handle, 0);
+ if (lease_broken != NULL)
+ *lease_broken = cancelled;
+
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
+ NULL);
+ return rc;
+}
+EXPORT_SYMBOL(ll_lease_close);
+
/* Fills the obdo with the attributes for the lsm */
static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
struct obd_capa *capa, struct obdo *obdo,
@@ -905,7 +1123,7 @@ out:
cl_io_fini(env, io);
/* If any bit been read/written (result != 0), we just return
* short read/write instead of restart io. */
- if (result == 0 && io->ci_need_restart) {
+ if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
CDEBUG(D_VFSTRACE, "Restart %s on %s from %lld, count:%zd\n",
iot == CIT_READ ? "read" : "write",
file->f_dentry->d_name.name, *ppos, count);
@@ -930,48 +1148,16 @@ out:
return result;
}
-
-/*
- * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
- */
-static int ll_file_get_iov_count(const struct iovec *iov,
- unsigned long *nr_segs, size_t *count)
-{
- size_t cnt = 0;
- unsigned long seg;
-
- for (seg = 0; seg < *nr_segs; seg++) {
- const struct iovec *iv = &iov[seg];
-
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- cnt += iv->iov_len;
- if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
- return -EINVAL;
- if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
- continue;
- if (seg == 0)
- return -EFAULT;
- *nr_segs = seg;
- cnt -= iv->iov_len; /* This segment is no good */
- break;
- }
- *count = cnt;
- return 0;
-}
-
static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct lu_env *env;
struct vvp_io_args *args;
- size_t count;
+ size_t count = 0;
ssize_t result;
int refcheck;
- result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (result)
return result;
@@ -1026,11 +1212,11 @@ static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct lu_env *env;
struct vvp_io_args *args;
- size_t count;
+ size_t count = 0;
ssize_t result;
int refcheck;
- result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (result)
return result;
@@ -1482,12 +1668,11 @@ int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
if (!och)
GOTO(out, rc = -ENOMEM);
- ll_och_fill(ll_i2sbi(inode)->ll_md_exp,
- ll_i2info(inode), it, och);
+ ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och);
- out:
+ inode, och, NULL);
+out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
ptlrpc_req_finished(it->d.lustre.it_data);
@@ -1692,6 +1877,53 @@ out:
return rc;
}
+/*
+ * Trigger a HSM release request for the provided inode.
+ */
+int ll_hsm_release(struct inode *inode)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct obd_client_handle *och = NULL;
+ __u64 data_version = 0;
+ int rc;
+
+
+ CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&ll_i2info(inode)->lli_fid));
+
+ och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
+ if (IS_ERR(och))
+ GOTO(out, rc = PTR_ERR(och));
+
+ /* Grab latest data_version and [am]time values */
+ rc = ll_data_version(inode, &data_version, 1);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ GOTO(out, rc = PTR_ERR(env));
+
+ ll_merge_lvb(env, inode);
+ cl_env_nested_put(&nest, env);
+
+ /* Release the file.
+ * NB: lease lock handle is released in mdc_hsm_release_pack() because
+ * we still need it to pack l_remote_handle to MDT. */
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
+ &data_version);
+ och = NULL;
+
+
+out:
+ if (och != NULL && !IS_ERR(och)) /* close the file */
+ ll_lease_close(och, inode, NULL);
+
+ return rc;
+}
+
struct ll_swap_stack {
struct iattr ia1, ia2;
__u64 dv1, dv2;
@@ -1853,6 +2085,86 @@ free:
return rc;
}
+static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
+{
+ struct md_op_data *op_data;
+ int rc;
+
+ /* Non-root users are forbidden to set or clear flags which are
+ * NOT defined in HSM_USER_MASK. */
+ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
+ !cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hss);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode),
+ sizeof(*op_data), op_data, NULL);
+
+ ll_finish_md_op_data(op_data);
+
+ return rc;
+}
+
+static int ll_hsm_import(struct inode *inode, struct file *file,
+ struct hsm_user_import *hui)
+{
+ struct hsm_state_set *hss = NULL;
+ struct iattr *attr = NULL;
+ int rc;
+
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ /* set HSM flags */
+ OBD_ALLOC_PTR(hss);
+ if (hss == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
+ hss->hss_archive_id = hui->hui_archive_id;
+ hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
+ rc = ll_hsm_state_set(inode, hss);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ OBD_ALLOC_PTR(attr);
+ if (attr == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
+ attr->ia_mode |= S_IFREG;
+ attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
+ attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
+ attr->ia_size = hui->hui_size;
+ attr->ia_mtime.tv_sec = hui->hui_mtime;
+ attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
+ attr->ia_atime.tv_sec = hui->hui_atime;
+ attr->ia_atime.tv_nsec = hui->hui_atime_ns;
+
+ attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
+ ATTR_UID | ATTR_GID |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_ATIME | ATTR_ATIME_SET;
+
+ rc = ll_setattr_raw(file->f_dentry, attr, true);
+ if (rc == -ENODATA)
+ rc = 0;
+
+out:
+ if (hss != NULL)
+ OBD_FREE_PTR(hss);
+
+ if (attr != NULL)
+ OBD_FREE_PTR(attr);
+
+ return rc;
+}
+
long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -2014,37 +2326,19 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return rc;
}
case LL_IOC_HSM_STATE_SET: {
- struct md_op_data *op_data;
struct hsm_state_set *hss;
int rc;
OBD_ALLOC_PTR(hss);
if (hss == NULL)
return -ENOMEM;
+
if (copy_from_user(hss, (char *)arg, sizeof(*hss))) {
OBD_FREE_PTR(hss);
return -EFAULT;
}
- /* Non-root users are forbidden to set or clear flags which are
- * NOT defined in HSM_USER_MASK. */
- if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK)
- && !cfs_capable(CFS_CAP_SYS_ADMIN)) {
- OBD_FREE_PTR(hss);
- return -EPERM;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hss);
- if (IS_ERR(op_data)) {
- OBD_FREE_PTR(hss);
- return PTR_ERR(op_data);
- }
-
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
- op_data, NULL);
-
- ll_finish_md_op_data(op_data);
+ rc = ll_hsm_state_set(inode, hss);
OBD_FREE_PTR(hss);
return rc;
@@ -2075,6 +2369,107 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
OBD_FREE_PTR(hca);
return rc;
}
+ case LL_IOC_SET_LEASE: {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle *och = NULL;
+ bool lease_broken;
+ fmode_t mode = 0;
+
+ switch (arg) {
+ case F_WRLCK:
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+ mode = FMODE_WRITE;
+ break;
+ case F_RDLCK:
+ if (!(file->f_mode & FMODE_READ))
+ return -EPERM;
+ mode = FMODE_READ;
+ break;
+ case F_UNLCK:
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ och = fd->fd_lease_och;
+ fd->fd_lease_och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (och != NULL) {
+ mode = och->och_flags &
+ (FMODE_READ|FMODE_WRITE);
+ rc = ll_lease_close(och, inode, &lease_broken);
+ if (rc == 0 && lease_broken)
+ mode = 0;
+ } else {
+ rc = -ENOLCK;
+ }
+
+ /* return the type of lease or error */
+ return rc < 0 ? rc : (int)mode;
+ default:
+ return -EINVAL;
+ }
+
+ CDEBUG(D_INODE, "Set lease with mode %d\n", mode);
+
+ /* apply for lease */
+ och = ll_lease_open(inode, file, mode, 0);
+ if (IS_ERR(och))
+ return PTR_ERR(och);
+
+ rc = 0;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och == NULL) {
+ fd->fd_lease_och = och;
+ och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (och != NULL) {
+ /* impossible now that only excl is supported for now */
+ ll_lease_close(och, inode, &lease_broken);
+ rc = -EBUSY;
+ }
+ return rc;
+ }
+ case LL_IOC_GET_LEASE: {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ldlm_lock *lock = NULL;
+
+ rc = 0;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ struct obd_client_handle *och = fd->fd_lease_och;
+
+ lock = ldlm_handle2lock(&och->och_lease_handle);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
+ if (!ldlm_is_cancel(lock))
+ rc = och->och_flags &
+ (FMODE_READ | FMODE_WRITE);
+ unlock_res_and_lock(lock);
+ ldlm_lock_put(lock);
+ }
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ return rc;
+ }
+ case LL_IOC_HSM_IMPORT: {
+ struct hsm_user_import *hui;
+
+ OBD_ALLOC_PTR(hui);
+ if (hui == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(hui, (void *)arg, sizeof(*hui))) {
+ OBD_FREE_PTR(hui);
+ return -EFAULT;
+ }
+
+ rc = ll_hsm_import(inode, file, hui);
+
+ OBD_FREE_PTR(hui);
+ return rc;
+ }
default: {
int err;
@@ -2435,7 +2830,8 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
}
ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags)
+ struct lustre_handle *lockh, __u64 flags,
+ ldlm_mode_t mode)
{
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
@@ -2445,8 +2841,8 @@ ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
- fid, LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, lockh);
+ fid, LDLM_IBITS, &policy, mode, lockh);
+
return rc;
}
@@ -2581,7 +2977,15 @@ int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
} else {
- rc = ll_glimpse_size(inode);
+ /* In case of restore, the MDT has the right size and has
+ * already send it back without granting the layout lock,
+ * inode is up-to-date so glimpse is useless.
+ * Also to glimpse we need the layout, in case of a running
+ * restore the MDT holds the layout lock so the glimpse will
+ * block up to the end of restore (getattr will block)
+ */
+ if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING))
+ rc = ll_glimpse_size(inode);
}
return rc;
}
@@ -2628,6 +3032,38 @@ int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
return ll_getattr_it(mnt, de, &it, stat);
}
+int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
+{
+ int rc;
+ size_t num_bytes;
+ struct ll_user_fiemap *fiemap;
+ unsigned int extent_count = fieinfo->fi_extents_max;
+
+ num_bytes = sizeof(*fiemap) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+ OBD_ALLOC_LARGE(fiemap, num_bytes);
+
+ if (fiemap == NULL)
+ return -ENOMEM;
+
+ fiemap->fm_flags = fieinfo->fi_flags;
+ fiemap->fm_extent_count = fieinfo->fi_extents_max;
+ fiemap->fm_start = start;
+ fiemap->fm_length = len;
+ memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
+ sizeof(struct ll_fiemap_extent));
+
+ rc = ll_do_fiemap(inode, fiemap, num_bytes);
+
+ fieinfo->fi_flags = fiemap->fm_flags;
+ fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
+ memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
+ fiemap->fm_mapped_extents * sizeof(struct ll_fiemap_extent));
+
+ OBD_FREE_LARGE(fiemap, num_bytes);
+ return rc;
+}
struct posix_acl * ll_get_acl(struct inode *inode, int type)
{
@@ -2676,17 +3112,12 @@ int ll_inode_permission(struct inode *inode, int mask)
return rc;
}
-#define READ_METHOD aio_read
-#define READ_FUNCTION ll_file_aio_read
-#define WRITE_METHOD aio_write
-#define WRITE_FUNCTION ll_file_aio_write
-
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
.read = ll_file_read,
- .READ_METHOD = READ_FUNCTION,
+ .aio_read = ll_file_aio_read,
.write = ll_file_write,
- .WRITE_METHOD = WRITE_FUNCTION,
+ .aio_write = ll_file_aio_write,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -2699,9 +3130,9 @@ struct file_operations ll_file_operations = {
struct file_operations ll_file_operations_flock = {
.read = ll_file_read,
- .READ_METHOD = READ_FUNCTION,
+ .aio_read = ll_file_aio_read,
.write = ll_file_write,
- .WRITE_METHOD = WRITE_FUNCTION,
+ .aio_write = ll_file_aio_write,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -2717,9 +3148,9 @@ struct file_operations ll_file_operations_flock = {
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
.read = ll_file_read,
- .READ_METHOD = READ_FUNCTION,
+ .aio_read = ll_file_aio_read,
.write = ll_file_write,
- .WRITE_METHOD = WRITE_FUNCTION,
+ .aio_write = ll_file_aio_write,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -2740,6 +3171,7 @@ struct inode_operations ll_file_inode_operations = {
.getxattr = ll_getxattr,
.listxattr = ll_listxattr,
.removexattr = ll_removexattr,
+ .fiemap = ll_fiemap,
.get_acl = ll_get_acl,
};
@@ -3086,7 +3518,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
/* mostly layout lock is caching on the local side, so try to match
* it before grabbing layout lock mutex. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode, gen, false);
if (rc == 0)
@@ -3101,7 +3534,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
again:
/* try again. Maybe somebody else has done this. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
if (rc == -EAGAIN)
@@ -3150,3 +3584,30 @@ again:
return rc;
}
+
+/**
+ * This function send a restore request to the MDT
+ */
+int ll_layout_restore(struct inode *inode)
+{
+ struct hsm_user_request *hur;
+ int len, rc;
+
+ len = sizeof(struct hsm_user_request) +
+ sizeof(struct hsm_user_item);
+ OBD_ALLOC(hur, len);
+ if (hur == NULL)
+ return -ENOMEM;
+
+ hur->hur_request.hr_action = HUA_RESTORE;
+ hur->hur_request.hr_archive_id = 0;
+ hur->hur_request.hr_flags = 0;
+ memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
+ sizeof(hur->hur_user_item[0].hui_fid));
+ hur->hur_user_item[0].hui_extent.length = -1;
+ hur->hur_request.hr_itemcount = 1;
+ rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp,
+ len, hur, NULL);
+ OBD_FREE(hur, len);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 47e443d..7ee5c02 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -46,6 +46,8 @@
#include <lclient.h>
#include <lustre_mdc.h>
#include <linux/lustre_intent.h>
+#include <linux/compat.h>
+#include <linux/posix_acl_xattr.h>
#ifndef FMODE_EXEC
#define FMODE_EXEC 0
@@ -124,6 +126,10 @@ enum lli_flags {
LLIF_SRVLOCK = (1 << 5),
/* File data is modified. */
LLIF_DATA_MODIFIED = (1 << 6),
+ /* File is being restored */
+ LLIF_FILE_RESTORING = (1 << 7),
+ /* Xattr cache is attached to the file */
+ LLIF_XATTR_CACHE = (1 << 8),
};
struct ll_inode_info {
@@ -276,8 +282,27 @@ struct ll_inode_info {
struct mutex lli_layout_mutex;
/* valid only inside LAYOUT ibits lock, protected by lli_layout_mutex */
__u32 lli_layout_gen;
+
+ struct rw_semaphore lli_xattrs_list_rwsem;
+ struct mutex lli_xattrs_enq_lock;
+ struct list_head lli_xattrs;/* ll_xattr_entry->xe_list */
};
+int ll_xattr_cache_destroy(struct inode *inode);
+
+int ll_xattr_cache_get(struct inode *inode,
+ const char *name,
+ char *buffer,
+ size_t size,
+ __u64 valid);
+
+int ll_xattr_cache_update(struct inode *inode,
+ const char *name,
+ const char *newval,
+ size_t size,
+ __u64 valid,
+ int flags);
+
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
@@ -399,6 +424,7 @@ enum stats_track_type {
#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
+#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_FLAGS { \
"nolck", \
@@ -406,6 +432,7 @@ enum stats_track_type {
"flock", \
"xattr", \
"acl", \
+ "???", \
"rmt_client", \
"mds_capa", \
"oss_capa", \
@@ -418,7 +445,9 @@ enum stats_track_type {
"agl", \
"verbose", \
"layout", \
- "user_fid2path" }
+ "user_fid2path",\
+ "xattr", \
+}
/* default value for ll_sb_info->contention_time */
#define SBI_DEFAULT_CONTENTION_SECONDS 60
@@ -458,7 +487,8 @@ struct ll_sb_info {
struct lu_fid ll_root_fid; /* root object fid */
int ll_flags;
- int ll_umounting:1;
+ unsigned int ll_umounting:1,
+ ll_xattr_cache_enabled:1;
struct list_head ll_conn_chain; /* per-conn chain of SBs */
struct lustre_client_ocd ll_lco;
@@ -607,10 +637,14 @@ extern struct kmem_cache *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
- int fd_omode;
struct ccc_grouplock fd_grouplock;
__u64 lfd_pos;
__u32 fd_flags;
+ fmode_t fd_omode;
+ /* openhandle if lease exists for this file.
+ * Borrow lli->lli_och_mutex to protect assignment */
+ struct obd_client_handle *fd_lease_och;
+ struct obd_client_handle *fd_och;
struct file *fd_file;
/* Indicate whether need to report failure when close.
* true: failure is known, not report again.
@@ -643,7 +677,12 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#if BITS_PER_LONG == 32
return 1;
#else
- return unlikely(current_is_32bit() || (sbi->ll_flags & LL_SBI_32BIT_API));
+ return unlikely(
+#ifdef CONFIG_COMPAT
+ is_compat_task() ||
+#endif
+ (sbi->ll_flags & LL_SBI_32BIT_API)
+ );
#endif
}
@@ -663,15 +702,22 @@ int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi);
void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars);
+void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
+ struct ll_file_data *file, loff_t pos,
+ size_t count, int rw);
#else
static inline int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
struct super_block *sb, char *osc, char *mdc){return 0;}
static inline void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi) {}
-static void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {}
-static void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
+static inline
+void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {}
+static inline void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
{
memset(lvars, 0, sizeof(*lvars));
}
+static inline void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
+ struct ll_file_data *file, loff_t pos,
+ size_t count, int rw) {}
#endif
@@ -720,7 +766,8 @@ extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
ldlm_mode_t l_req_mode);
extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags);
+ struct lustre_handle *lockh, __u64 flags,
+ ldlm_mode_t mode);
int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
__u64 bits);
int ll_revalidate_nd(struct dentry *dentry, unsigned int flags);
@@ -746,9 +793,6 @@ int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
struct md_open_data **mod);
void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
struct lustre_handle *fh);
-extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
- struct ll_file_data *file, loff_t pos,
- size_t count, int rw);
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
struct lookup_intent *it, struct kstat *stat);
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
@@ -775,6 +819,12 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg);
int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
int ll_fid2path(struct inode *inode, void *arg);
int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
+int ll_hsm_release(struct inode *inode);
+
+struct obd_client_handle *ll_lease_open(struct inode *inode, struct file *file,
+ fmode_t mode, __u64 flags);
+int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
+ bool *lease_broken);
/* llite/dcache.c */
@@ -801,7 +851,7 @@ void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
struct inode *ll_inode_from_lock(struct ldlm_lock *lock);
void ll_clear_inode(struct inode *inode);
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr);
+int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
int ll_setattr(struct dentry *de, struct iattr *attr);
int ll_statfs(struct dentry *de, struct kstatfs *sfs);
int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
@@ -1578,5 +1628,9 @@ enum {
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
+int ll_layout_restore(struct inode *inode);
+
+int ll_xattr_init(void);
+void ll_xattr_fini(void);
#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index fd584ff..6cfdb9e 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -56,6 +56,7 @@
#include "llite_internal.h"
struct kmem_cache *ll_file_data_slab;
+struct proc_dir_entry *proc_lustre_fs_root;
LIST_HEAD(ll_super_blocks);
DEFINE_SPINLOCK(ll_sb_lock);
@@ -209,7 +210,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
+ OBD_CONNECT_LAYOUTLOCK |
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE;
if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
data->ocd_connect_flags |= OBD_CONNECT_SOM;
@@ -383,6 +385,17 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
}
+ if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
+ if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
+ LCONSOLE_INFO(
+ "%s: disabling xattr cache due to unknown maximum xattr size.\n",
+ dt);
+ } else {
+ sbi->ll_flags |= LL_SBI_XATTR_CACHE;
+ sbi->ll_xattr_cache_enabled = 1;
+ }
+ }
+
obd = class_name2obd(dt);
if (!obd) {
CERROR("DT %s: not setup or attached\n", dt);
@@ -922,6 +935,9 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
lli->lli_clob = NULL;
+ init_rwsem(&lli->lli_xattrs_list_rwsem);
+ mutex_init(&lli->lli_xattrs_enq_lock);
+
LASSERT(lli->lli_vfs_inode.i_mode != 0);
if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
mutex_init(&lli->lli_readdir_mutex);
@@ -1194,6 +1210,8 @@ void ll_clear_inode(struct inode *inode)
lli->lli_symlink_name = NULL;
}
+ ll_xattr_cache_destroy(inode);
+
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
LASSERT(lli->lli_posix_acl == NULL);
if (lli->lli_remote_perms) {
@@ -1346,19 +1364,24 @@ static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
* to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
* I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
* at the same time.
+ *
+ * In case of HSMimport, we only set attr on MDS.
*/
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
+int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
{
struct inode *inode = dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct md_op_data *op_data = NULL;
struct md_open_data *mod = NULL;
+ bool file_is_released = false;
int rc = 0, rc1 = 0;
- CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
- "valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ CDEBUG(D_VFSTRACE,
+ "%s: setattr inode %p/fid:"DFID
+ " from %llu to %llu, valid %x, hsm_import %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), inode,
PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid);
+ attr->ia_valid, hsm_import);
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
@@ -1436,10 +1459,40 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
(attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
op_data->op_flags = MF_EPOCH_OPEN;
+ /* truncate on a released file must failed with -ENODATA,
+ * so size must not be set on MDS for released file
+ * but other attributes must be set
+ */
+ if (S_ISREG(inode->i_mode)) {
+ struct lov_stripe_md *lsm;
+ __u32 gen;
+
+ ll_layout_refresh(inode, &gen);
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
+ file_is_released = true;
+ ccc_inode_lsm_put(inode, lsm);
+ }
+
+ /* if not in HSM import mode, clear size attr for released file
+ * we clear the attribute send to MDT in op_data, not the original
+ * received from caller in attr which is used later to
+ * decide return code */
+ if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
+ op_data->op_attr.ia_valid &= ~ATTR_SIZE;
+
rc = ll_md_setattr(dentry, op_data, &mod);
if (rc)
GOTO(out, rc);
+ /* truncate failed (only when non HSM import), others succeed */
+ if (file_is_released) {
+ if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
+ GOTO(out, rc = -ENODATA);
+ else
+ GOTO(out, rc = 0);
+ }
+
/* RPC to MDT is sent, cancel data modification flag */
if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
spin_lock(&lli->lli_lock);
@@ -1473,7 +1526,7 @@ out:
if (!S_ISDIR(inode->i_mode)) {
up_write(&lli->lli_trunc_sem);
mutex_lock(&inode->i_mutex);
- if (attr->ia_valid & ATTR_SIZE)
+ if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
inode_dio_wait(inode);
}
@@ -1508,7 +1561,7 @@ int ll_setattr(struct dentry *de, struct iattr *attr)
!(attr->ia_valid & ATTR_KILL_SGID))
attr->ia_valid |= ATTR_KILL_SGID;
- return ll_setattr_raw(de, attr);
+ return ll_setattr_raw(de, attr, false);
}
int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
@@ -1721,7 +1774,9 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
* lock on the client and set LLIF_MDS_SIZE_LOCK holding
* it. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
- &lockh, LDLM_FL_CBPENDING);
+ &lockh, LDLM_FL_CBPENDING,
+ LCK_CR | LCK_CW |
+ LCK_PR | LCK_PW);
if (mode) {
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
@@ -1761,6 +1816,11 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
LASSERT(md->oss_capa);
ll_add_capa(inode, md->oss_capa);
}
+
+ if (body->valid & OBD_MD_TSTATE) {
+ if (body->t_state & MS_RESTORE)
+ lli->lli_flags |= LLIF_FILE_RESTORING;
+ }
}
void ll_read_inode2(struct inode *inode, void *opaque)
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index e2421ea..5338e8d 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -856,7 +856,8 @@ static void lloop_exit(void)
module_init(lloop_init);
module_exit(lloop_exit);
-CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
+module_param(max_loop, int, 0444);
+MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre virtual block device");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 4bf09c4..a9a104a 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -42,9 +42,6 @@
#include "llite_internal.h"
-struct proc_dir_entry *proc_lustre_fs_root;
-
-#ifdef LPROCFS
/* /proc/lustre/llite mount point registration */
extern struct file_operations vvp_dump_pgcache_file_ops;
struct file_operations ll_rw_extents_stats_fops;
@@ -723,6 +720,41 @@ static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
}
LPROC_SEQ_FOPS_RO(ll_sbi_flags);
+static int ll_xattr_cache_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int rc;
+
+ rc = seq_printf(m, "%u\n", sbi->ll_xattr_cache_enabled);
+
+ return rc;
+}
+
+static ssize_t ll_xattr_cache_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct super_block *sb = seq->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val != 0 && val != 1)
+ return -ERANGE;
+
+ if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
+ return -ENOTSUPP;
+
+ sbi->ll_xattr_cache_enabled = val;
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_xattr_cache);
+
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "uuid", &ll_sb_uuid_fops, 0, 0 },
//{ "mntpt_path", ll_rd_path, 0, 0 },
@@ -751,6 +783,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "lazystatfs", &ll_lazystatfs_fops, 0 },
{ "max_easize", &ll_maxea_size_fops, 0, 0 },
{ "sbi_flags", &ll_sbi_flags_fops, 0, 0 },
+ { "xattr_cache", &ll_xattr_cache_fops, 0, 0 },
{ 0 }
};
@@ -802,6 +835,7 @@ struct llite_file_opcode {
{ LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
{ LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
{ LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
+ { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
{ LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
{ LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
{ LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
@@ -1367,4 +1401,3 @@ void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = NULL;
lvars->obd_vars = lprocfs_llite_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 90bbdae..fc8d264 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -223,6 +223,10 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+
+ if (bits & MDS_INODELOCK_XATTR)
+ ll_xattr_cache_destroy(inode);
+
/* For OPEN locks we differentiate between lock modes
* LCK_CR, LCK_CW, LCK_PR - bug 22891 */
if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
@@ -233,12 +237,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
ll_have_md_lock(inode, &bits, mode);
fid = ll_inode2fid(inode);
- if (lock->l_resource->lr_name.name[0] != fid_seq(fid) ||
- lock->l_resource->lr_name.name[1] != fid_oid(fid) ||
- lock->l_resource->lr_name.name[2] != fid_ver(fid)) {
+ if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
LDLM_ERROR(lock, "data mismatch with object "
DFID" (%p)", PFID(fid), inode);
- }
if (bits & MDS_INODELOCK_OPEN) {
int flags = 0;
@@ -526,8 +527,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
icbd.icbd_childp = &dentry;
icbd.icbd_parent = parent;
- if (it->it_op & IT_CREAT ||
- (it->it_op & IT_OPEN && it->it_create_mode & O_CREAT))
+ if (it->it_op & IT_CREAT)
opc = LUSTRE_OPC_CREATE;
else
opc = LUSTRE_OPC_ANY;
@@ -626,7 +626,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
return -ENOMEM;
it->it_op = IT_OPEN;
- if (mode) {
+ if (open_flags & O_CREAT) {
it->it_op |= IT_CREAT;
lookup_flags |= LOOKUP_CREATE;
}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 0beaf4e..e21e1c7 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -187,11 +187,15 @@ static int __init init_lustre_lite(void)
if (rc == 0)
rc = vvp_global_init();
+ if (rc == 0)
+ rc = ll_xattr_init();
+
return rc;
}
static void __exit exit_lustre_lite(void)
{
+ ll_xattr_fini();
vvp_global_fini();
del_timer(&ll_capa_timer);
ll_capa_thread_stop();
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 3ff664c..93cbfbb 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -121,8 +121,38 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n",
- io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen);
+ CDEBUG(D_VFSTRACE, DFID
+ " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ io->ci_ignore_layout, io->ci_verify_layout,
+ cio->cui_layout_gen, io->ci_restore_needed);
+
+ if (io->ci_restore_needed == 1) {
+ int rc;
+
+ /* file was detected release, we need to restore it
+ * before finishing the io
+ */
+ rc = ll_layout_restore(ccc_object_inode(obj));
+ /* if restore registration failed, no restart,
+ * we will return -ENODATA */
+ /* The layout will change after restore, so we need to
+ * block on layout lock hold by the MDT
+ * as MDT will not send new layout in lvb (see LU-3124)
+ * we have to explicitly fetch it, all this will be done
+ * by ll_layout_refresh()
+ */
+ if (rc == 0) {
+ io->ci_restore_needed = 0;
+ io->ci_need_restart = 1;
+ io->ci_verify_layout = 1;
+ } else {
+ io->ci_restore_needed = 1;
+ io->ci_need_restart = 0;
+ io->ci_verify_layout = 0;
+ io->ci_result = rc;
+ }
+ }
if (!io->ci_ignore_layout && io->ci_verify_layout) {
__u32 gen = 0;
@@ -130,9 +160,17 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
/* check layout version */
ll_layout_refresh(ccc_object_inode(obj), &gen);
io->ci_need_restart = cio->cui_layout_gen != gen;
- if (io->ci_need_restart)
- CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n",
- cio->cui_layout_gen, gen);
+ if (io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE,
+ DFID" layout changed from %d to %d.\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ cio->cui_layout_gen, gen);
+ /* today successful restore is the only possible
+ * case */
+ /* restore was done, clear restoring state */
+ ll_i2info(ccc_object_inode(obj))->lli_flags &=
+ ~LLIF_FILE_RESTORING;
+ }
}
}
@@ -590,8 +628,11 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
if (vmf->page) {
- LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
- vmf->virtual_address);
+ CDEBUG(D_PAGE,
+ "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
+ vmf->page, vmf->page->mapping, vmf->page->index,
+ (long)vmf->page->flags, page_count(vmf->page),
+ page_private(vmf->page), vmf->virtual_address);
if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
cfio->fault.ft_flags &= VM_FAULT_LOCKED;
@@ -1111,6 +1152,12 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CDEBUG(D_VFSTRACE, DFID
+ " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ io->ci_ignore_layout, io->ci_verify_layout,
+ cio->cui_layout_gen, io->ci_restore_needed);
+
CL_IO_SLICE_CLEAN(cio, cui_cl);
cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
vio->cui_ra_window_set = 0;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 33173fc..25973de 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -138,7 +138,7 @@ int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
lli->lli_layout_gen,
conf->u.coc_md->lsm->lsm_layout_gen);
- lli->lli_has_smd = true;
+ lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
lli->lli_layout_gen = conf->u.coc_md->lsm->lsm_layout_gen;
} else {
CDEBUG(D_VFSTRACE, "layout lock destroyed: %u.\n",
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index bcf86ba..3a7d03c 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -109,12 +109,12 @@ int ll_setxattr_common(struct inode *inode, const char *name,
int flags, __u64 valid)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
int xattr_type, rc;
struct obd_capa *oc;
+ struct rmtacl_ctl_entry *rce = NULL;
#ifdef CONFIG_FS_POSIX_ACL
posix_acl_xattr_header *new_value = NULL;
- struct rmtacl_ctl_entry *rce = NULL;
ext_acl_xattr_header *acl = NULL;
#endif
const char *pv = value;
@@ -183,11 +183,17 @@ int ll_setxattr_common(struct inode *inode, const char *name,
valid |= rce_ops2valid(rce->rce_ops);
}
#endif
- oc = ll_mdscapa_get(inode);
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
- valid, name, pv, size, 0, flags, ll_i2suppgid(inode),
- &req);
- capa_put(oc);
+ if (sbi->ll_xattr_cache_enabled &&
+ (rce == NULL || rce->rce_ops == RMT_LSETFACL)) {
+ rc = ll_xattr_cache_update(inode, name, pv, size, valid, flags);
+ } else {
+ oc = ll_mdscapa_get(inode);
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid, name, pv, size, 0, flags,
+ ll_i2suppgid(inode), &req);
+ capa_put(oc);
+ }
+
#ifdef CONFIG_FS_POSIX_ACL
if (new_value != NULL)
lustre_posix_acl_xattr_free(new_value, size);
@@ -352,48 +358,54 @@ int ll_getxattr_common(struct inode *inode, const char *name,
#endif
do_getxattr:
- oc = ll_mdscapa_get(inode);
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
- valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
- name, NULL, 0, size, 0, &req);
- capa_put(oc);
- if (rc) {
- if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
- return rc;
- }
+ if (sbi->ll_xattr_cache_enabled && (rce == NULL ||
+ rce->rce_ops == RMT_LGETFACL ||
+ rce->rce_ops == RMT_LSETFACL)) {
+ rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
+ if (rc < 0)
+ GOTO(out_xattr, rc);
+ } else {
+ oc = ll_mdscapa_get(inode);
+ rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
+ name, NULL, 0, size, 0, &req);
+ capa_put(oc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body);
+ if (rc < 0)
+ GOTO(out_xattr, rc);
- /* only detect the xattr size */
- if (size == 0)
- GOTO(out, rc = body->eadatasize);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body);
- if (size < body->eadatasize) {
- CERROR("server bug: replied size %u > %u\n",
- body->eadatasize, (int)size);
- GOTO(out, rc = -ERANGE);
- }
+ /* only detect the xattr size */
+ if (size == 0)
+ GOTO(out, rc = body->eadatasize);
+
+ if (size < body->eadatasize) {
+ CERROR("server bug: replied size %u > %u\n",
+ body->eadatasize, (int)size);
+ GOTO(out, rc = -ERANGE);
+ }
- if (body->eadatasize == 0)
- GOTO(out, rc = -ENODATA);
+ if (body->eadatasize == 0)
+ GOTO(out, rc = -ENODATA);
- /* do not need swab xattr data */
- xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
- if (!xdata)
- GOTO(out, rc = -EFAULT);
+ /* do not need swab xattr data */
+ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
+ body->eadatasize);
+ if (!xdata)
+ GOTO(out, rc = -EFAULT);
+
+ memcpy(buffer, xdata, body->eadatasize);
+ rc = body->eadatasize;
+ }
#ifdef CONFIG_FS_POSIX_ACL
- if (body->eadatasize >= 0 && rce && rce->rce_ops == RMT_LSETFACL) {
+ if (rce && rce->rce_ops == RMT_LSETFACL) {
ext_acl_xattr_header *acl;
- acl = lustre_posix_acl_xattr_2ext((posix_acl_xattr_header *)xdata,
- body->eadatasize);
+ acl = lustre_posix_acl_xattr_2ext(
+ (posix_acl_xattr_header *)buffer, rc);
if (IS_ERR(acl))
GOTO(out, rc = PTR_ERR(acl));
@@ -406,12 +418,12 @@ do_getxattr:
}
#endif
- if (body->eadatasize == 0) {
- rc = -ENODATA;
- } else {
- LASSERT(buffer);
- memcpy(buffer, xdata, body->eadatasize);
- rc = body->eadatasize;
+out_xattr:
+ if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
+ LCONSOLE_INFO(
+ "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), rc);
+ sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
out:
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
new file mode 100644
index 0000000..3e3be1f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <obd_support.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include <lustre_ver.h>
+#include "llite_internal.h"
+
+/* If we ever have hundreds of extended attributes, we might want to consider
+ * using a hash or a tree structure instead of list for faster lookups.
+ */
+struct ll_xattr_entry {
+ struct list_head xe_list; /* protected with
+ * lli_xattrs_list_rwsem */
+ char *xe_name; /* xattr name, \0-terminated */
+ char *xe_value; /* xattr value */
+ unsigned xe_namelen; /* strlen(xe_name) + 1 */
+ unsigned xe_vallen; /* xattr value length */
+};
+
+static struct kmem_cache *xattr_kmem;
+static struct lu_kmem_descr xattr_caches[] = {
+ {
+ .ckd_cache = &xattr_kmem,
+ .ckd_name = "xattr_kmem",
+ .ckd_size = sizeof(struct ll_xattr_entry)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+int ll_xattr_init(void)
+{
+ return lu_kmem_init(xattr_caches);
+}
+
+void ll_xattr_fini(void)
+{
+ lu_kmem_fini(xattr_caches);
+}
+
+/**
+ * Initializes xattr cache for an inode.
+ *
+ * This initializes the xattr list and marks cache presence.
+ */
+static void ll_xattr_cache_init(struct ll_inode_info *lli)
+{
+
+
+ LASSERT(lli != NULL);
+
+ INIT_LIST_HEAD(&lli->lli_xattrs);
+ lli->lli_flags |= LLIF_XATTR_CACHE;
+}
+
+/**
+ * This looks for a specific extended attribute.
+ *
+ * Find in @cache and return @xattr_name attribute in @xattr,
+ * for the NULL @xattr_name return the first cached @xattr.
+ *
+ * \retval 0 success
+ * \retval -ENODATA if not found
+ */
+static int ll_xattr_cache_find(struct list_head *cache,
+ const char *xattr_name,
+ struct ll_xattr_entry **xattr)
+{
+ struct ll_xattr_entry *entry;
+
+
+
+ list_for_each_entry(entry, cache, xe_list) {
+ /* xattr_name == NULL means look for any entry */
+ if (xattr_name == NULL ||
+ strcmp(xattr_name, entry->xe_name) == 0) {
+ *xattr = entry;
+ CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
+ entry->xe_name, entry->xe_vallen,
+ entry->xe_value);
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * This adds or updates an xattr.
+ *
+ * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
+ * if the attribute already exists, then update its value.
+ *
+ * \retval 0 success
+ * \retval -ENOMEM if no memory could be allocated for the cached attr
+ */
+static int ll_xattr_cache_add(struct list_head *cache,
+ const char *xattr_name,
+ const char *xattr_val,
+ unsigned xattr_val_len)
+{
+ struct ll_xattr_entry *xattr;
+
+
+
+ if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
+ /* Found a cached EA, update it */
+
+ if (xattr_val_len != xattr->xe_vallen) {
+ char *val;
+ OBD_ALLOC(val, xattr_val_len);
+ if (val == NULL) {
+ CDEBUG(D_CACHE,
+ "failed to allocate %u bytes for xattr %s update\n",
+ xattr_val_len, xattr_name);
+ return -ENOMEM;
+ }
+ OBD_FREE(xattr->xe_value, xattr->xe_vallen);
+ xattr->xe_value = val;
+ xattr->xe_vallen = xattr_val_len;
+ }
+ memcpy(xattr->xe_value, xattr_val, xattr_val_len);
+
+ CDEBUG(D_CACHE, "update: [%s]=%.*s\n", xattr_name,
+ xattr_val_len, xattr_val);
+
+ return 0;
+ }
+
+ OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, __GFP_IO);
+ if (xattr == NULL) {
+ CDEBUG(D_CACHE, "failed to allocate xattr\n");
+ return -ENOMEM;
+ }
+
+ xattr->xe_namelen = strlen(xattr_name) + 1;
+
+ OBD_ALLOC(xattr->xe_name, xattr->xe_namelen);
+ if (!xattr->xe_name) {
+ CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
+ xattr->xe_namelen);
+ goto err_name;
+ }
+ OBD_ALLOC(xattr->xe_value, xattr_val_len);
+ if (!xattr->xe_value) {
+ CDEBUG(D_CACHE, "failed to alloc xattr value %d\n",
+ xattr_val_len);
+ goto err_value;
+ }
+
+ memcpy(xattr->xe_name, xattr_name, xattr->xe_namelen);
+ memcpy(xattr->xe_value, xattr_val, xattr_val_len);
+ xattr->xe_vallen = xattr_val_len;
+ list_add(&xattr->xe_list, cache);
+
+ CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
+ xattr_val_len, xattr_val);
+
+ return 0;
+err_value:
+ OBD_FREE(xattr->xe_name, xattr->xe_namelen);
+err_name:
+ OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
+
+ return -ENOMEM;
+}
+
+/**
+ * This removes an extended attribute from cache.
+ *
+ * Remove @xattr_name attribute from @cache.
+ *
+ * \retval 0 success
+ * \retval -ENODATA if @xattr_name is not cached
+ */
+static int ll_xattr_cache_del(struct list_head *cache,
+ const char *xattr_name)
+{
+ struct ll_xattr_entry *xattr;
+
+
+
+ CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
+
+ if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
+ list_del(&xattr->xe_list);
+ OBD_FREE(xattr->xe_name, xattr->xe_namelen);
+ OBD_FREE(xattr->xe_value, xattr->xe_vallen);
+ OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
+
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * This iterates cached extended attributes.
+ *
+ * Walk over cached attributes in @cache and
+ * fill in @xld_buffer or only calculate buffer
+ * size if @xld_buffer is NULL.
+ *
+ * \retval >= 0 buffer list size
+ * \retval -ENODATA if the list cannot fit @xld_size buffer
+ */
+static int ll_xattr_cache_list(struct list_head *cache,
+ char *xld_buffer,
+ int xld_size)
+{
+ struct ll_xattr_entry *xattr, *tmp;
+ int xld_tail = 0;
+
+
+
+ list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
+ CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
+ xld_buffer, xld_tail, xattr->xe_name);
+
+ if (xld_buffer) {
+ xld_size -= xattr->xe_namelen;
+ if (xld_size < 0)
+ break;
+ memcpy(&xld_buffer[xld_tail],
+ xattr->xe_name, xattr->xe_namelen);
+ }
+ xld_tail += xattr->xe_namelen;
+ }
+
+ if (xld_size < 0)
+ return -ERANGE;
+
+ return xld_tail;
+}
+
+/**
+ * Check if the xattr cache is initialized (filled).
+ *
+ * \retval 0 @cache is not initialized
+ * \retval 1 @cache is initialized
+ */
+int ll_xattr_cache_valid(struct ll_inode_info *lli)
+{
+ return !!(lli->lli_flags & LLIF_XATTR_CACHE);
+}
+
+/**
+ * This finalizes the xattr cache.
+ *
+ * Free all xattr memory. @lli is the inode info pointer.
+ *
+ * \retval 0 no error occured
+ */
+static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
+{
+
+
+ if (!ll_xattr_cache_valid(lli))
+ return 0;
+
+ while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
+ ; /* empty loop */
+ lli->lli_flags &= ~LLIF_XATTR_CACHE;
+
+ return 0;
+}
+
+int ll_xattr_cache_destroy(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
+
+
+
+ down_write(&lli->lli_xattrs_list_rwsem);
+ rc = ll_xattr_cache_destroy_locked(lli);
+ up_write(&lli->lli_xattrs_list_rwsem);
+
+ return rc;
+}
+
+/**
+ * Match or enqueue a PR or PW LDLM lock.
+ *
+ * Find or request an LDLM lock with xattr data.
+ * Since LDLM does not provide API for atomic match_or_enqueue,
+ * the function handles it with a separate enq lock.
+ * If successful, the function exits with the list lock held.
+ *
+ * \retval 0 no error occured
+ * \retval -ENOMEM not enough memory
+ */
+static int ll_xattr_find_get_lock(struct inode *inode,
+ struct lookup_intent *oit,
+ struct ptlrpc_request **req)
+{
+ ldlm_mode_t mode;
+ struct lustre_handle lockh = { 0 };
+ struct md_op_data *op_data;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(oit),
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast };
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obd_export *exp = sbi->ll_md_exp;
+ int rc;
+
+
+
+ mutex_lock(&lli->lli_xattrs_enq_lock);
+ /* Try matching first. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
+ oit->it_op == IT_SETXATTR ? LCK_PW :
+ (LCK_PR | LCK_PW));
+ if (mode != 0) {
+ /* fake oit in mdc_revalidate_lock() manner */
+ oit->d.lustre.it_lock_handle = lockh.cookie;
+ oit->d.lustre.it_lock_mode = mode;
+ goto out;
+ }
+
+ /* Enqueue if the lock isn't cached locally. */
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data)) {
+ mutex_unlock(&lli->lli_xattrs_enq_lock);
+ return PTR_ERR(op_data);
+ }
+
+ op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS |
+ OBD_MD_FLXATTRLOCKED;
+#ifdef CONFIG_FS_POSIX_ACL
+ /* If working with ACLs, we would like to cache local ACLs */
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ op_data->op_valid |= OBD_MD_FLRMTLGETFACL;
+#endif
+
+ rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
+ ll_finish_md_op_data(op_data);
+
+ if (rc < 0) {
+ CDEBUG(D_CACHE,
+ "md_intent_lock failed with %d for fid "DFID"\n",
+ rc, PFID(ll_inode2fid(inode)));
+ mutex_unlock(&lli->lli_xattrs_enq_lock);
+ return rc;
+ }
+
+ *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
+out:
+ down_write(&lli->lli_xattrs_list_rwsem);
+ mutex_unlock(&lli->lli_xattrs_enq_lock);
+
+ return 0;
+}
+
+/**
+ * Refill the xattr cache.
+ *
+ * Fetch and cache the whole of xattrs for @inode, acquiring
+ * a read or a write xattr lock depending on operation in @oit.
+ * Intent is dropped on exit unless the operation is setxattr.
+ *
+ * \retval 0 no error occured
+ * \retval -EPROTO network protocol error
+ * \retval -ENOMEM not enough memory for the cache
+ */
+static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ const char *xdata, *xval, *xtail, *xvtail;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct mdt_body *body;
+ __u32 *xsizes;
+ int rc = 0, i;
+
+
+
+ rc = ll_xattr_find_get_lock(inode, oit, &req);
+ if (rc)
+ GOTO(out_no_unlock, rc);
+
+ /* Do we have the data at this point? */
+ if (ll_xattr_cache_valid(lli)) {
+ ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
+ GOTO(out_maybe_drop, rc = 0);
+ }
+
+ /* Matched but no cache? Cancelled on error by a parallel refill. */
+ if (unlikely(req == NULL)) {
+ CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
+ GOTO(out_maybe_drop, rc = -EIO);
+ }
+
+ if (oit->d.lustre.it_status < 0) {
+ CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
+ oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
+ GOTO(out_destroy, rc = oit->d.lustre.it_status);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL) {
+ CERROR("no MDT BODY in the refill xattr reply\n");
+ GOTO(out_destroy, rc = -EPROTO);
+ }
+ /* do not need swab xattr data */
+ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
+ body->eadatasize);
+ xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
+ body->aclsize);
+ xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
+ body->max_mdsize * sizeof(__u32));
+ if (xdata == NULL || xval == NULL || xsizes == NULL) {
+ CERROR("wrong setxattr reply\n");
+ GOTO(out_destroy, rc = -EPROTO);
+ }
+
+ xtail = xdata + body->eadatasize;
+ xvtail = xval + body->aclsize;
+
+ CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
+
+ ll_xattr_cache_init(lli);
+
+ for (i = 0; i < body->max_mdsize; i++) {
+ CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
+ /* Perform consistency checks: attr names and vals in pill */
+ if (memchr(xdata, 0, xtail - xdata) == NULL) {
+ CERROR("xattr protocol violation (names are broken)\n");
+ rc = -EPROTO;
+ } else if (xval + *xsizes > xvtail) {
+ CERROR("xattr protocol violation (vals are broken)\n");
+ rc = -EPROTO;
+ } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
+ rc = -ENOMEM;
+ } else {
+ rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
+ *xsizes);
+ }
+ if (rc < 0) {
+ ll_xattr_cache_destroy_locked(lli);
+ GOTO(out_destroy, rc);
+ }
+ xdata += strlen(xdata) + 1;
+ xval += *xsizes;
+ xsizes++;
+ }
+
+ if (xdata != xtail || xval != xvtail)
+ CERROR("a hole in xattr data\n");
+
+ ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
+
+ GOTO(out_maybe_drop, rc);
+out_maybe_drop:
+ /* drop lock on error or getxattr */
+ if (rc != 0 || oit->it_op != IT_SETXATTR)
+ ll_intent_drop_lock(oit);
+
+ if (rc != 0)
+ up_write(&lli->lli_xattrs_list_rwsem);
+out_no_unlock:
+ ptlrpc_req_finished(req);
+
+ return rc;
+
+out_destroy:
+ up_write(&lli->lli_xattrs_list_rwsem);
+
+ ldlm_lock_decref_and_cancel((struct lustre_handle *)
+ &oit->d.lustre.it_lock_handle,
+ oit->d.lustre.it_lock_mode);
+
+ goto out_no_unlock;
+}
+
+/**
+ * Get an xattr value or list xattrs using the write-through cache.
+ *
+ * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
+ * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
+ * The resulting value/list is stored in @buffer if the former
+ * is not larger than @size.
+ *
+ * \retval 0 no error occured
+ * \retval -EPROTO network protocol error
+ * \retval -ENOMEM not enough memory for the cache
+ * \retval -ERANGE the buffer is not large enough
+ * \retval -ENODATA no such attr or the list is empty
+ */
+int ll_xattr_cache_get(struct inode *inode,
+ const char *name,
+ char *buffer,
+ size_t size,
+ __u64 valid)
+{
+ struct lookup_intent oit = { .it_op = IT_GETXATTR };
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc = 0;
+
+
+
+ LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
+
+ down_read(&lli->lli_xattrs_list_rwsem);
+ if (!ll_xattr_cache_valid(lli)) {
+ up_read(&lli->lli_xattrs_list_rwsem);
+ rc = ll_xattr_cache_refill(inode, &oit);
+ if (rc)
+ return rc;
+ downgrade_write(&lli->lli_xattrs_list_rwsem);
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
+ }
+
+ if (valid & OBD_MD_FLXATTR) {
+ struct ll_xattr_entry *xattr;
+
+ rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
+ if (rc == 0) {
+ rc = xattr->xe_vallen;
+ /* zero size means we are only requested size in rc */
+ if (size != 0) {
+ if (size >= xattr->xe_vallen)
+ memcpy(buffer, xattr->xe_value,
+ xattr->xe_vallen);
+ else
+ rc = -ERANGE;
+ }
+ }
+ } else if (valid & OBD_MD_FLXATTRLS) {
+ rc = ll_xattr_cache_list(&lli->lli_xattrs,
+ size ? buffer : NULL, size);
+ }
+
+ GOTO(out, rc);
+out:
+ up_read(&lli->lli_xattrs_list_rwsem);
+
+ return rc;
+}
+
+
+/**
+ * Set/update an xattr value or remove xattr using the write-through cache.
+ *
+ * Set/update the xattr value (if @valid has OBD_MD_FLXATTR) of @name to @newval
+ * or
+ * remove the xattr @name (@valid has OBD_MD_FLXATTRRM set) from @inode.
+ * @flags is either XATTR_CREATE or XATTR_REPLACE as defined by setxattr(2)
+ *
+ * \retval 0 no error occured
+ * \retval -EPROTO network protocol error
+ * \retval -ENOMEM not enough memory for the cache
+ * \retval -ERANGE the buffer is not large enough
+ * \retval -ENODATA no such attr (in the removal case)
+ */
+int ll_xattr_cache_update(struct inode *inode,
+ const char *name,
+ const char *newval,
+ size_t size,
+ __u64 valid,
+ int flags)
+{
+ struct lookup_intent oit = { .it_op = IT_SETXATTR };
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *oc;
+ int rc;
+
+
+
+ LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRRM));
+
+ rc = ll_xattr_cache_refill(inode, &oit);
+ if (rc)
+ return rc;
+
+ oc = ll_mdscapa_get(inode);
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid | OBD_MD_FLXATTRLOCKED, name, newval,
+ size, 0, flags, ll_i2suppgid(inode), &req);
+ capa_put(oc);
+
+ if (rc) {
+ ll_intent_drop_lock(&oit);
+ GOTO(out, rc);
+ }
+
+ if (valid & OBD_MD_FLXATTR)
+ rc = ll_xattr_cache_add(&lli->lli_xattrs, name, newval, size);
+ else if (valid & OBD_MD_FLXATTRRM)
+ rc = ll_xattr_cache_del(&lli->lli_xattrs, name);
+
+ ll_intent_drop_lock(&oit);
+ GOTO(out, rc);
+out:
+ up_write(&lli->lli_xattrs_list_rwsem);
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lmv/Makefile b/drivers/staging/lustre/lustre/lmv/Makefile
index 8cc81ad..9162ef7 100644
--- a/drivers/staging/lustre/lustre/lmv/Makefile
+++ b/drivers/staging/lustre/lustre/lmv/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_LUSTRE_FS) += lmv.o
-lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o lproc_lmv.o
-
+lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o
+lmv-$(CONFIG_PROC_FS) += lproc_lmv.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index 0b2d38d..fd6b5ec 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -37,7 +37,6 @@
#define DEBUG_SUBSYSTEM S_LMV
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 511b3b4..56dedce 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -37,7 +37,6 @@
#define DEBUG_SUBSYSTEM S_LMV
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index c286604..1bddd8f 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -628,7 +628,7 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
if (rc)
- CERROR("Can't finanize fids factory\n");
+ CERROR("Can't finalize fids factory\n");
CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
tgt->ltd_exp->exp_obd->obd_name,
@@ -712,7 +712,7 @@ repeat_fid2path:
GOTO(out_fid2path, rc);
/* If remote_gf != NULL, it means just building the
- * path on the remote MDT, copy this path segement to gf */
+ * path on the remote MDT, copy this path segment to gf */
if (remote_gf != NULL) {
struct getinfo_fid2path *ori_gf;
char *ptr;
@@ -1212,7 +1212,7 @@ static int lmv_placement_policy(struct obd_device *obd,
/**
* If stripe_offset is provided during setdirstripe
- * (setdirstripe -i xx), xx MDS will be choosen.
+ * (setdirstripe -i xx), xx MDS will be chosen.
*/
if (op_data->op_cli_flags & CLI_SET_MEA) {
struct lmv_user_md *lum;
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index edb5a3a..b355d01 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -41,10 +41,6 @@
#include <lprocfs_status.h>
#include <obd_class.h>
-#ifndef LPROCFS
-static struct lprocfs_vars lprocfs_module_vars[] = { {0} };
-static struct lprocfs_vars lprocfs_obd_vars[] = { {0} };
-#else
static int lmv_numobd_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = (struct obd_device *)m->private;
@@ -226,7 +222,6 @@ struct file_operations lmv_proc_target_fops = {
.release = seq_release,
};
-#endif /* LPROCFS */
void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars)
{
lvars->module_vars = lprocfs_lmv_module_vars;
diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile
index 67eaec2..9a5f26d 100644
--- a/drivers/staging/lustre/lustre/lov/Makefile
+++ b/drivers/staging/lustre/lustre/lov/Makefile
@@ -1,8 +1,9 @@
obj-$(CONFIG_LUSTRE_FS) += lov.o
-lov-y := lov_log.o lov_obd.o lov_pack.o lproc_lov.o lov_offset.o lov_merge.o \
+lov-y := lov_log.o lov_obd.o lov_pack.o lov_offset.o lov_merge.o \
lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \
lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \
lovsub_lock.o lovsub_io.o lov_pool.o
+lov-$(CONFIG_PROC_FS) += lproc_lov.o
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 4276124..3965d5e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -168,6 +168,22 @@ enum lov_layout_type {
LLT_NR
};
+static inline char *llt2str(enum lov_layout_type llt)
+{
+ switch (llt) {
+ case LLT_EMPTY:
+ return "EMPTY";
+ case LLT_RAID0:
+ return "RAID0";
+ case LLT_RELEASED:
+ return "RELEASED";
+ case LLT_NR:
+ LBUG();
+ }
+ LBUG();
+ return "";
+}
+
/**
* lov-specific file state.
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 796da89..2b22a03 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -283,8 +283,8 @@ void lsm_free_plain(struct lov_stripe_md *lsm);
int lovea_destroy_object(struct lov_obd *lov, struct lov_stripe_md *lsm,
struct obdo *oa, void *data);
/* lproc_lov.c */
-extern struct file_operations lov_proc_target_fops;
#ifdef LPROCFS
+extern const struct file_operations lov_proc_target_fops;
void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars);
#else
static inline void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 2792fa5..5a6ab70 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -947,14 +947,23 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
LASSERTF(0, "invalid type %d\n", io->ci_type);
case CIT_MISC:
case CIT_FSYNC:
- result = +1;
+ result = 1;
break;
case CIT_SETATTR:
+ /* the truncate to 0 is managed by MDT:
+ * - in open, for open O_TRUNC
+ * - in setattr, for truncate
+ */
+ /* the truncate is for size > 0 so triggers a restore */
+ if (cl_io_is_trunc(io))
+ io->ci_restore_needed = 1;
+ result = -ENODATA;
+ break;
case CIT_READ:
case CIT_WRITE:
case CIT_FAULT:
- /* TODO: need to restore the file. */
- result = -EBADF;
+ io->ci_restore_needed = 1;
+ result = -ENODATA;
break;
}
if (result == 0) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index 26bc719..ed2726e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -71,7 +71,7 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
/*
* FIXME: We tend to use the subio's env & io to call the sublock
* lock operations because osc lock sometimes stores some control
- * variables in thread's IO infomation(Now only lockless information).
+ * variables in thread's IO information(Now only lockless information).
* However, if the lock's host(object) is different from the object
* for current IO, we have no way to get the subenv and subio because
* they are not initialized at all. As a temp fix, in this case,
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index d204fed..9defa55 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -156,7 +156,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
kms = lov_size_to_stripe(lsm, size, stripe);
CDEBUG(D_INODE,
"stripe %d KMS %sing "LPU64"->"LPU64"\n",
- stripe, kms > loi->loi_kms ? "increas":"shrink",
+ stripe, kms > loi->loi_kms ? "increase":"shrink",
loi->loi_kms, kms);
loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 4783450..50a77c5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -1174,7 +1174,7 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- /* don't do attribute merge if this aysnc op failed */
+ /* don't do attribute merge if this async op failed */
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_getattr_set(lovset);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index cf2fa8a..df8b5b5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -393,13 +393,13 @@ static int lov_print_empty(const struct lu_env *env, void *cookie,
static int lov_print_raid0(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- struct lov_object *lov = lu2lov(o);
- struct lov_layout_raid0 *r0 = lov_r0(lov);
- struct lov_stripe_md *lsm = lov->lo_lsm;
- int i;
+ struct lov_object *lov = lu2lov(o);
+ struct lov_layout_raid0 *r0 = lov_r0(lov);
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+ int i;
- (*p)(env, cookie, "stripes: %d, %svalid, lsm{%p 0x%08X %d %u %u}: \n",
- r0->lo_nr, lov->lo_layout_invalid ? "in" : "", lsm,
+ (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
+ r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
lsm->lsm_stripe_count, lsm->lsm_layout_gen);
for (i = 0; i < r0->lo_nr; ++i) {
@@ -408,8 +408,9 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
if (r0->lo_sub[i] != NULL) {
sub = lovsub2lu(r0->lo_sub[i]);
lu_object_print(env, cookie, p, sub);
- } else
+ } else {
(*p)(env, cookie, "sub %d absent\n", i);
+ }
}
return 0;
}
@@ -417,7 +418,14 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
static int lov_print_released(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- (*p)(env, cookie, "released\n");
+ struct lov_object *lov = lu2lov(o);
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+
+ (*p)(env, cookie,
+ "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
+ lov->lo_layout_invalid ? "invalid" : "valid", lsm,
+ lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ lsm->lsm_stripe_count, lsm->lsm_layout_gen);
return 0;
}
@@ -662,6 +670,10 @@ static int lov_layout_change(const struct lu_env *unused,
return PTR_ERR(env);
}
+ CDEBUG(D_INODE, DFID" from %s to %s\n",
+ PFID(lu_object_fid(lov2lu(lov))),
+ llt2str(lov->lo_type), llt2str(llt));
+
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
@@ -750,8 +762,9 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
if (conf->u.coc_md != NULL)
lsm = conf->u.coc_md->lsm;
if ((lsm == NULL && lov->lo_lsm == NULL) ||
- (lsm != NULL && lov->lo_lsm != NULL &&
- lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen)) {
+ ((lsm != NULL && lov->lo_lsm != NULL) &&
+ (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
+ (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
/* same version of layout */
lov->lo_layout_invalid = false;
GOTO(out, result = 0);
@@ -767,6 +780,8 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
out:
lov_conf_unlock(lov);
+ CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
+ PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
return result;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index ec6f6e05..27ed27e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -105,24 +105,22 @@ void lov_dump_lmm(int level, void *lmm)
{
int magic;
- magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
+ magic = le32_to_cpu(((struct lov_mds_md *)lmm)->lmm_magic);
switch (magic) {
case LOV_MAGIC_V1:
- return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
+ lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)lmm);
+ break;
case LOV_MAGIC_V3:
- return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
+ lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)lmm);
+ break;
default:
- CERROR("Cannot recognize lmm_magic %x", magic);
+ CDEBUG(level, "unrecognized lmm_magic %x, assuming %x\n",
+ magic, LOV_MAGIC_V1);
+ lov_dump_lmm_common(level, lmm);
+ break;
}
- return;
}
-#define LMM_ASSERT(test) \
-do { \
- if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
- LASSERT(test); /* so we know what assertion failed */ \
-} while (0)
-
/* Pack LOV object metadata for disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index a1701df..3bda0c1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -453,7 +453,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
INIT_HLIST_NODE(&new_pool->pool_hash);
#ifdef LPROCFS
- /* we need this assert seq_file is not implementated for liblustre */
+ /* we need this assert seq_file is not implemented for liblustre */
/* get ref for /proc file */
lov_pool_getref(new_pool);
new_pool->pool_proc_entry = lprocfs_add_simple(lov->lov_pool_proc_entry,
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index bf324ae..ca81cac 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -835,7 +835,7 @@ int lov_fini_getattr_set(struct lov_request_set *set)
return rc;
}
-/* The callback for osc_getattr_async that finilizes a request info when a
+/* The callback for osc_getattr_async that finalizes a request info when a
* response is received. */
static int cb_getattr_update(void *cookie, int rc)
{
@@ -1017,7 +1017,7 @@ int lov_update_setattr_set(struct lov_request_set *set,
return rc;
}
-/* The callback for osc_setattr_async that finilizes a request info when a
+/* The callback for osc_setattr_async that finalizes a request info when a
* response is received. */
static int cb_setattr_update(void *cookie, int rc)
{
@@ -1140,7 +1140,7 @@ int lov_update_punch_set(struct lov_request_set *set,
return rc;
}
-/* The callback for osc_punch that finilizes a request info when a response
+/* The callback for osc_punch that finalizes a request info when a response
* is received. */
static int cb_update_punch(void *cookie, int rc)
{
@@ -1236,8 +1236,8 @@ int lov_fini_sync_set(struct lov_request_set *set)
return rc;
}
-/* The callback for osc_sync that finilizes a request info when a
- * response is recieved. */
+/* The callback for osc_sync that finalizes a request info when a
+ * response is received. */
static int cb_sync_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
@@ -1407,7 +1407,7 @@ void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
}
}
-/* The callback for osc_statfs_async that finilizes a request info when a
+/* The callback for osc_statfs_async that finalizes a request info when a
* response is received. */
static int cb_statfs_update(void *cookie, int rc)
{
@@ -1485,7 +1485,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
continue;
}
- /* skip targets that have been explicitely disabled by the
+ /* skip targets that have been explicitly disabled by the
* administrator */
if (!lov->lov_tgts[i]->ltd_exp) {
CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 15744e1..bd7da56 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -41,7 +41,6 @@
#include <linux/seq_file.h>
#include "lov_internal.h"
-#ifdef LPROCFS
static int lov_stripesize_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = (struct obd_device *)m->private;
@@ -260,29 +259,29 @@ LPROC_SEQ_FOPS_RO_TYPE(lov, kbytesfree);
LPROC_SEQ_FOPS_RO_TYPE(lov, kbytesavail);
struct lprocfs_vars lprocfs_lov_obd_vars[] = {
- { "uuid", &lov_uuid_fops, 0, 0 },
- { "stripesize", &lov_stripesize_fops, 0 },
- { "stripeoffset", &lov_stripeoffset_fops, 0 },
- { "stripecount", &lov_stripecount_fops, 0 },
- { "stripetype", &lov_stripetype_fops, 0 },
- { "numobd", &lov_numobd_fops, 0, 0 },
- { "activeobd", &lov_activeobd_fops, 0, 0 },
- { "filestotal", &lov_filestotal_fops, 0, 0 },
- { "filesfree", &lov_filesfree_fops, 0, 0 },
- /*{ "filegroups", lprocfs_rd_filegroups, 0, 0 },*/
- { "blocksize", &lov_blksize_fops, 0, 0 },
- { "kbytestotal", &lov_kbytestotal_fops, 0, 0 },
- { "kbytesfree", &lov_kbytesfree_fops, 0, 0 },
- { "kbytesavail", &lov_kbytesavail_fops, 0, 0 },
- { "desc_uuid", &lov_desc_uuid_fops, 0, 0 },
- { 0 }
+ { "uuid", &lov_uuid_fops, NULL, 0 },
+ { "stripesize", &lov_stripesize_fops, NULL },
+ { "stripeoffset", &lov_stripeoffset_fops, NULL },
+ { "stripecount", &lov_stripecount_fops, NULL },
+ { "stripetype", &lov_stripetype_fops, NULL },
+ { "numobd", &lov_numobd_fops, NULL, 0 },
+ { "activeobd", &lov_activeobd_fops, NULL, 0 },
+ { "filestotal", &lov_filestotal_fops, NULL, 0 },
+ { "filesfree", &lov_filesfree_fops, NULL, 0 },
+ /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
+ { "blocksize", &lov_blksize_fops, NULL, 0 },
+ { "kbytestotal", &lov_kbytestotal_fops, NULL, 0 },
+ { "kbytesfree", &lov_kbytesfree_fops, NULL, 0 },
+ { "kbytesavail", &lov_kbytesavail_fops, NULL, 0 },
+ { "desc_uuid", &lov_desc_uuid_fops, NULL, 0 },
+ { NULL }
};
LPROC_SEQ_FOPS_RO_TYPE(lov, numrefs);
static struct lprocfs_vars lprocfs_lov_module_vars[] = {
- { "num_refs", &lov_numrefs_fops, 0, 0 },
- { 0 }
+ { "num_refs", &lov_numrefs_fops, NULL, 0 },
+ { NULL }
};
void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
@@ -291,11 +290,10 @@ void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
lvars->obd_vars = lprocfs_lov_obd_vars;
}
-struct file_operations lov_proc_target_fops = {
+const struct file_operations lov_proc_target_fops = {
.owner = THIS_MODULE,
.open = lov_target_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = lprocfs_seq_release,
};
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/lvfs/Makefile b/drivers/staging/lustre/lustre/lvfs/Makefile
index f50b1c5..e0367c3 100644
--- a/drivers/staging/lustre/lustre/lvfs/Makefile
+++ b/drivers/staging/lustre/lustre/lvfs/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += lvfs.o
-lvfs-y := lvfs_linux.o fsfilt.o lvfs_lib.o
+lvfs-y := lvfs_linux.o fsfilt.o
+lvfs-$(CONFIG_PROC_FS) += lvfs_lib.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c b/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
deleted file mode 100644
index ee75994..0000000
--- a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
+++ /dev/null
@@ -1,760 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lvfs/fsfilt_ext3.c
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FILTER
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <ldiskfs/ldiskfs_config.h>
-#include <ext4/ext4.h>
-#include <ext4/ext4_jbd2.h>
-#include <linux/bitops.h>
-#include <linux/quota.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_fsfilt.h>
-#include <obd.h>
-#include <linux/lustre_compat25.h>
-#include <linux/lprocfs_status.h>
-
-#include <ext4/ext4_extents.h>
-
-#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
-#define ext3_ext_pblock(ex) ext_pblock((ex))
-#endif
-
-/* for kernels 2.6.18 and later */
-#define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
-
-#define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
- ext3_ext_insert_extent(handle, inode, path, newext, flag)
-
-#define ext3_mb_discard_inode_preallocations(inode) \
- ext3_discard_preallocations(inode)
-
-#define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
-#define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
-
-static struct kmem_cache *fcb_cache;
-
-struct fsfilt_cb_data {
- struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
- fsfilt_cb_t cb_func; /* MDS/OBD completion function */
- struct obd_device *cb_obd; /* MDS/OBD completion device */
- __u64 cb_last_rcvd; /* MDS/OST last committed operation */
- void *cb_data; /* MDS/OST completion function data */
-};
-
-static char *fsfilt_ext3_get_label(struct super_block *sb)
-{
- return EXT3_SB(sb)->s_es->s_volume_name;
-}
-
-/* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
-# include <ext4/truncate.h>
-
-/*
- * We don't currently need any additional blocks for rmdir and
- * unlink transactions because we are storing the OST oa_id inside
- * the inode (which we will be changing anyways as part of this
- * transaction).
- */
-static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
- int logs)
-{
- /* For updates to the last received file */
- int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
- journal_t *journal;
- void *handle;
-
- if (current->journal_info) {
- CDEBUG(D_INODE, "increasing refcount on %p\n",
- current->journal_info);
- goto journal_start;
- }
-
- switch(op) {
- case FSFILT_OP_UNLINK:
- /* delete one file + create/update logs for each stripe */
- nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
- nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
- FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
- break;
- case FSFILT_OP_CANCEL_UNLINK:
- LASSERT(logs == 1);
-
- /* blocks for log header bitmap update OR
- * blocks for catalog header bitmap update + unlink of logs +
- * blocks for delete the inode (include blocks truncating). */
- nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
- EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
- ext4_blocks_for_truncate(inode) + 3;
- break;
- default: CERROR("unknown transaction start op %d\n", op);
- LBUG();
- }
-
- LASSERT(current->journal_info == desc_private);
- journal = EXT3_SB(inode->i_sb)->s_journal;
- if (nblocks > journal->j_max_transaction_buffers) {
- CWARN("too many credits %d for op %ux%u using %d instead\n",
- nblocks, op, logs, journal->j_max_transaction_buffers);
- nblocks = journal->j_max_transaction_buffers;
- }
-
- journal_start:
- LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
- handle = ext3_journal_start(inode, nblocks);
-
- if (!IS_ERR(handle))
- LASSERT(current->journal_info == handle);
- else
- CERROR("error starting handle for op %u (%u credits): rc %ld\n",
- op, nblocks, PTR_ERR(handle));
- return handle;
-}
-
-static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
-{
- int rc;
- handle_t *handle = h;
-
- LASSERT(current->journal_info == handle);
- if (force_sync)
- handle->h_sync = 1; /* recovery likes this */
-
- rc = ext3_journal_stop(handle);
-
- return rc;
-}
-
-#ifndef EXT3_EXTENTS_FL
-#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
-#endif
-
-#ifndef EXT_ASSERT
-#define EXT_ASSERT(cond) BUG_ON(!(cond))
-#endif
-
-#define EXT_GENERATION(inode) (EXT4_I(inode)->i_ext_generation)
-#define ext3_ext_base inode
-#define ext3_ext_base2inode(inode) (inode)
-#define EXT_DEPTH(inode) ext_depth(inode)
-#define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
- ext3_ext_walk_space(inode, block, num, cb, cbdata);
-
-struct bpointers {
- unsigned long *blocks;
- unsigned long start;
- int num;
- int init_num;
- int create;
-};
-
-static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
- unsigned long block, int *aflags)
-{
- struct ext3_inode_info *ei = EXT3_I(inode);
- unsigned long bg_start;
- unsigned long colour;
- int depth;
-
- if (path) {
- struct ext3_extent *ex;
- depth = path->p_depth;
-
- /* try to predict block placement */
- if ((ex = path[depth].p_ext))
- return ext4_ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
-
- /* it looks index is empty
- * try to find starting from index itself */
- if (path[depth].p_bh)
- return path[depth].p_bh->b_blocknr;
- }
-
- /* OK. use inode's group */
- bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
- le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
- colour = (current->pid % 16) *
- (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
- return bg_start + colour + block;
-}
-
-#define ll_unmap_underlying_metadata(sb, blocknr) \
- unmap_underlying_metadata((sb)->s_bdev, blocknr)
-
-#ifndef EXT3_MB_HINT_GROUP_ALLOC
-static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
- struct ext3_ext_path *path, unsigned long block,
- unsigned long *count, int *err)
-{
- unsigned long pblock, goal;
- int aflags = 0;
- struct inode *inode = ext3_ext_base2inode(base);
-
- goal = ext3_ext_find_goal(inode, path, block, &aflags);
- aflags |= 2; /* block have been already reserved */
- pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
- return pblock;
-
-}
-#else
-static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
- struct ext3_ext_path *path, unsigned long block,
- unsigned long *count, int *err)
-{
- struct inode *inode = ext3_ext_base2inode(base);
- struct ext3_allocation_request ar;
- unsigned long pblock;
- int aflags;
-
- /* find neighbour allocated blocks */
- ar.lleft = block;
- *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
- if (*err)
- return 0;
- ar.lright = block;
- *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
- if (*err)
- return 0;
-
- /* allocate new block */
- ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
- ar.inode = inode;
- ar.logical = block;
- ar.len = *count;
- ar.flags = EXT3_MB_HINT_DATA;
- pblock = ext3_mb_new_blocks(handle, &ar, err);
- *count = ar.len;
- return pblock;
-}
-#endif
-
-static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
- struct ext3_ext_path *path,
- struct ext3_ext_cache *cex,
-#ifdef HAVE_EXT_PREPARE_CB_EXTENT
- struct ext3_extent *ex,
-#endif
- void *cbdata)
-{
- struct bpointers *bp = cbdata;
- struct inode *inode = ext3_ext_base2inode(base);
- struct ext3_extent nex;
- unsigned long pblock;
- unsigned long tgen;
- int err, i;
- unsigned long count;
- handle_t *handle;
-
-#ifdef EXT3_EXT_CACHE_EXTENT
- if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
-#else
- if ((cex->ec_len != 0) && (cex->ec_start != 0))
-#endif
- {
- err = EXT_CONTINUE;
- goto map;
- }
-
- if (bp->create == 0) {
- i = 0;
- if (cex->ec_block < bp->start)
- i = bp->start - cex->ec_block;
- if (i >= cex->ec_len)
- CERROR("nothing to do?! i = %d, e_num = %u\n",
- i, cex->ec_len);
- for (; i < cex->ec_len && bp->num; i++) {
- *(bp->blocks) = 0;
- bp->blocks++;
- bp->num--;
- bp->start++;
- }
-
- return EXT_CONTINUE;
- }
-
- tgen = EXT_GENERATION(base);
- count = ext3_ext_calc_credits_for_insert(base, path);
-
- handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
- if (IS_ERR(handle)) {
- return PTR_ERR(handle);
- }
-
- if (tgen != EXT_GENERATION(base)) {
- /* the tree has changed. so path can be invalid at moment */
- ext3_journal_stop(handle);
- return EXT_REPEAT;
- }
-
- /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
- * protected by i_data_sem as whole. so we patch it to store
- * generation to path and now verify the tree hasn't changed */
- down_write((&EXT4_I(inode)->i_data_sem));
-
- /* validate extent, make sure the extent tree does not changed */
- if (EXT_GENERATION(base) != path[0].p_generation) {
- /* cex is invalid, try again */
- up_write(&EXT4_I(inode)->i_data_sem);
- ext3_journal_stop(handle);
- return EXT_REPEAT;
- }
-
- count = cex->ec_len;
- pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
- if (!pblock)
- goto out;
- EXT_ASSERT(count <= cex->ec_len);
-
- /* insert new extent */
- nex.ee_block = cpu_to_le32(cex->ec_block);
- ext3_ext_store_pblock(&nex, pblock);
- nex.ee_len = cpu_to_le16(count);
- err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
- if (err) {
- /* free data blocks we just allocated */
- /* not a good idea to call discard here directly,
- * but otherwise we'd need to call it every free() */
-#ifdef EXT3_MB_HINT_GROUP_ALLOC
- ext3_mb_discard_inode_preallocations(inode);
-#endif
-#ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
- ext3_free_blocks(handle, inode, NULL, ext4_ext_pblock(&nex),
- cpu_to_le16(nex.ee_len), 0);
-#else
- ext3_free_blocks(handle, inode, ext4_ext_pblock(&nex),
- cpu_to_le16(nex.ee_len), 0);
-#endif
- goto out;
- }
-
- /*
- * Putting len of the actual extent we just inserted,
- * we are asking ext3_ext_walk_space() to continue
- * scaning after that block
- */
- cex->ec_len = le16_to_cpu(nex.ee_len);
- cex->ec_start = ext4_ext_pblock(&nex);
- BUG_ON(le16_to_cpu(nex.ee_len) == 0);
- BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
-
-out:
- up_write((&EXT4_I(inode)->i_data_sem));
- ext3_journal_stop(handle);
-map:
- if (err >= 0) {
- /* map blocks */
- if (bp->num == 0) {
- CERROR("hmm. why do we find this extent?\n");
- CERROR("initial space: %lu:%u\n",
- bp->start, bp->init_num);
-#ifdef EXT3_EXT_CACHE_EXTENT
- CERROR("current extent: %u/%u/%llu %d\n",
- cex->ec_block, cex->ec_len,
- (unsigned long long)cex->ec_start,
- cex->ec_type);
-#else
- CERROR("current extent: %u/%u/%llu\n",
- cex->ec_block, cex->ec_len,
- (unsigned long long)cex->ec_start);
-#endif
- }
- i = 0;
- if (cex->ec_block < bp->start)
- i = bp->start - cex->ec_block;
- if (i >= cex->ec_len)
- CERROR("nothing to do?! i = %d, e_num = %u\n",
- i, cex->ec_len);
- for (; i < cex->ec_len && bp->num; i++) {
- *(bp->blocks) = cex->ec_start + i;
-#ifdef EXT3_EXT_CACHE_EXTENT
- if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
-#else
- if ((cex->ec_len == 0) || (cex->ec_start == 0))
-#endif
- {
- /* unmap any possible underlying metadata from
- * the block device mapping. bug 6998. */
- ll_unmap_underlying_metadata(inode->i_sb,
- *(bp->blocks));
- }
- bp->blocks++;
- bp->num--;
- bp->start++;
- }
- }
- return err;
-}
-
-int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
- unsigned long num, unsigned long *blocks,
- int create)
-{
- struct ext3_ext_base *base = inode;
- struct bpointers bp;
- int err;
-
- CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
- block, block + num - 1, (unsigned) inode->i_ino);
-
- bp.blocks = blocks;
- bp.start = block;
- bp.init_num = bp.num = num;
- bp.create = create;
-
- err = fsfilt_ext3_ext_walk_space(base, block, num,
- ext3_ext_new_extent_cb, &bp);
- ext3_ext_invalidate_cache(base);
-
- return err;
-}
-
-int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
-{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
- int rc = 0, i = 0;
- struct page *fp = NULL;
- int clen = 0;
-
- CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
- inode->i_ino, pages, (*page)->index);
-
- /* pages are sorted already. so, we just have to find
- * contig. space and process them properly */
- while (i < pages) {
- if (fp == NULL) {
- /* start new extent */
- fp = *page++;
- clen = 1;
- i++;
- continue;
- } else if (fp->index + clen == (*page)->index) {
- /* continue the extent */
- page++;
- clen++;
- i++;
- continue;
- }
-
- /* process found extent */
- rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
- clen * blocks_per_page, blocks,
- create);
- if (rc)
- GOTO(cleanup, rc);
-
- /* look for next extent */
- fp = NULL;
- blocks += blocks_per_page * clen;
- }
-
- if (fp)
- rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
- clen * blocks_per_page, blocks,
- create);
-cleanup:
- return rc;
-}
-
-int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
-{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
- unsigned long *b;
- int rc = 0, i;
-
- for (i = 0, b = blocks; i < pages; i++, page++) {
- rc = ext3_map_inode_page(inode, *page, b, create);
- if (rc) {
- CERROR("ino %lu, blk %lu create %d: rc %d\n",
- inode->i_ino, *b, create, rc);
- break;
- }
-
- b += blocks_per_page;
- }
- return rc;
-}
-
-int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create, struct mutex *optional_mutex)
-{
- int rc;
-
- if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
- rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
- blocks, create);
- return rc;
- }
- if (optional_mutex != NULL)
- mutex_lock(optional_mutex);
- rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
- if (optional_mutex != NULL)
- mutex_unlock(optional_mutex);
-
- return rc;
-}
-
-int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
-{
- unsigned long block;
- struct buffer_head *bh;
- int err, blocksize, csize, boffs, osize = size;
-
- /* prevent reading after eof */
- spin_lock(&inode->i_lock);
- if (i_size_read(inode) < *offs + size) {
- size = i_size_read(inode) - *offs;
- spin_unlock(&inode->i_lock);
- if (size < 0) {
- CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
- i_size_read(inode), *offs);
- return -EBADR;
- } else if (size == 0) {
- return 0;
- }
- } else {
- spin_unlock(&inode->i_lock);
- }
-
- blocksize = 1 << inode->i_blkbits;
-
- while (size > 0) {
- block = *offs >> inode->i_blkbits;
- boffs = *offs & (blocksize - 1);
- csize = min(blocksize - boffs, size);
- bh = ext3_bread(NULL, inode, block, 0, &err);
- if (!bh) {
- CERROR("can't read block: %d\n", err);
- return err;
- }
-
- memcpy(buf, bh->b_data + boffs, csize);
- brelse(bh);
-
- *offs += csize;
- buf += csize;
- size -= csize;
- }
- return osize;
-}
-EXPORT_SYMBOL(fsfilt_ext3_read);
-
-static int fsfilt_ext3_read_record(struct file * file, void *buf,
- int size, loff_t *offs)
-{
- int rc;
- rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
- if (rc > 0)
- rc = 0;
- return rc;
-}
-
-int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
- loff_t *offs, handle_t *handle)
-{
- struct buffer_head *bh = NULL;
- loff_t old_size = i_size_read(inode), offset = *offs;
- loff_t new_size = i_size_read(inode);
- unsigned long block;
- int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
-
- while (bufsize > 0) {
- if (bh != NULL)
- brelse(bh);
-
- block = offset >> inode->i_blkbits;
- boffs = offset & (blocksize - 1);
- size = min(blocksize - boffs, bufsize);
- bh = ext3_bread(handle, inode, block, 1, &err);
- if (!bh) {
- CERROR("can't read/create block: %d\n", err);
- break;
- }
-
- err = ext3_journal_get_write_access(handle, bh);
- if (err) {
- CERROR("journal_get_write_access() returned error %d\n",
- err);
- break;
- }
- LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
- memcpy(bh->b_data + boffs, buf, size);
- err = ext3_journal_dirty_metadata(handle, bh);
- if (err) {
- CERROR("journal_dirty_metadata() returned error %d\n",
- err);
- break;
- }
- if (offset + size > new_size)
- new_size = offset + size;
- offset += size;
- bufsize -= size;
- buf += size;
- }
- if (bh)
- brelse(bh);
-
- /* correct in-core and on-disk sizes */
- if (new_size > i_size_read(inode)) {
- spin_lock(&inode->i_lock);
- if (new_size > i_size_read(inode))
- i_size_write(inode, new_size);
- if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
- EXT3_I(inode)->i_disksize = i_size_read(inode);
- if (i_size_read(inode) > old_size) {
- spin_unlock(&inode->i_lock);
- mark_inode_dirty(inode);
- } else {
- spin_unlock(&inode->i_lock);
- }
- }
-
- if (err == 0)
- *offs = offset;
- return err;
-}
-EXPORT_SYMBOL(fsfilt_ext3_write_handle);
-
-static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
- loff_t *offs, int force_sync)
-{
- struct inode *inode = file->f_dentry->d_inode;
- handle_t *handle;
- int err, block_count = 0, blocksize;
-
- /* Determine how many transaction credits are needed */
- blocksize = 1 << inode->i_blkbits;
- block_count = (*offs & (blocksize - 1)) + bufsize;
- block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
-
- handle = ext3_journal_start(inode,
- block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
- if (IS_ERR(handle)) {
- CERROR("can't start transaction for %d blocks (%d bytes)\n",
- block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
- bufsize);
- return PTR_ERR(handle);
- }
-
- err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
-
- if (!err && force_sync)
- handle->h_sync = 1; /* recovery likes this */
-
- ext3_journal_stop(handle);
-
- return err;
-}
-
-static int fsfilt_ext3_setup(struct super_block *sb)
-{
- if (!EXT3_HAS_COMPAT_FEATURE(sb,
- EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
- CERROR("ext3 mounted without journal\n");
- return -EINVAL;
- }
-
-#ifdef S_PDIROPS
- CWARN("Enabling PDIROPS\n");
- set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
- sb->s_flags |= S_PDIROPS;
-#endif
- if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
- CWARN("filesystem doesn't have dir_index feature enabled\n");
- return 0;
-}
-static struct fsfilt_operations fsfilt_ext3_ops = {
- .fs_type = "ext3",
- .fs_owner = THIS_MODULE,
- .fs_getlabel = fsfilt_ext3_get_label,
- .fs_start = fsfilt_ext3_start,
- .fs_commit = fsfilt_ext3_commit,
- .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
- .fs_write_record = fsfilt_ext3_write_record,
- .fs_read_record = fsfilt_ext3_read_record,
- .fs_setup = fsfilt_ext3_setup,
-};
-
-static int __init fsfilt_ext3_init(void)
-{
- int rc;
-
- fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
- sizeof(struct fsfilt_cb_data), 0, 0);
- if (!fcb_cache) {
- CERROR("error allocating fsfilt journal callback cache\n");
- GOTO(out, rc = -ENOMEM);
- }
-
- rc = fsfilt_register_ops(&fsfilt_ext3_ops);
-
- if (rc) {
- int err = kmem_cache_destroy(fcb_cache);
- LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
- }
-out:
- return rc;
-}
-
-static void __exit fsfilt_ext3_exit(void)
-{
- int rc;
-
- fsfilt_unregister_ops(&fsfilt_ext3_ops);
- rc = kmem_cache_destroy(fcb_cache);
- LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
-}
-
-module_init(fsfilt_ext3_init);
-module_exit(fsfilt_ext3_exit);
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
index b21e40c..7e47fc4 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
@@ -43,7 +43,6 @@
#include <lustre_lib.h>
#include <lprocfs_status.h>
-#ifdef LPROCFS
void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
{
struct lprocfs_counter *percpu_cntr;
@@ -169,4 +168,3 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
return rc;
}
EXPORT_SYMBOL(lprocfs_stats_alloc_one);
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index 09474e7..428ffd8 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -47,7 +47,6 @@
#include <linux/quotaops.h>
#include <linux/libcfs/libcfs.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/lustre_compat25.h>
#include <lvfs.h>
diff --git a/drivers/staging/lustre/lustre/mdc/Makefile b/drivers/staging/lustre/lustre/mdc/Makefile
index 93bae24..4c0bed14 100644
--- a/drivers/staging/lustre/lustre/mdc/Makefile
+++ b/drivers/staging/lustre/lustre/mdc/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += mdc.o
-mdc-y := mdc_request.o mdc_reint.o lproc_mdc.o mdc_lib.o mdc_locks.o
+mdc-y := mdc_request.o mdc_reint.o mdc_lib.o mdc_locks.o
+mdc-$(CONFIG_PROC_FS) += lproc_mdc.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index e0b8f18..2663480 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -39,8 +39,6 @@
#include <obd_class.h>
#include <lprocfs_status.h>
-#ifdef LPROCFS
-
static int mdc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
@@ -214,4 +212,3 @@ void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_mdc_module_vars;
lvars->obd_vars = lprocfs_mdc_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 2aeff0e..5069829 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -69,9 +69,10 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const void *data, int datalen, __u32 mode, __u32 uid,
__u32 gid, cfs_cap_t capability, __u64 rdev);
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u32 flags, const void *data,
+ __u32 mode, __u64 rdev, __u64 flags, const void *data,
int datalen);
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
+void mdc_getxattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const char *old, int oldlen, const char *new, int newlen);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index b2de478..91f6876 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -174,12 +174,13 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
}
}
-static __u64 mds_pack_open_flags(__u32 flags, __u32 mode)
+static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
{
__u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE |
MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |
MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |
- MDS_OPEN_BY_FID));
+ MDS_OPEN_BY_FID | MDS_OPEN_LEASE |
+ MDS_OPEN_RELEASE));
if (flags & O_CREAT)
cr_flags |= MDS_OPEN_CREAT;
if (flags & O_EXCL)
@@ -207,7 +208,7 @@ static __u64 mds_pack_open_flags(__u32 flags, __u32 mode)
/* packing of MDS records */
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u32 flags, const void *lmm,
+ __u32 mode, __u64 rdev, __u64 flags, const void *lmm,
int lmmlen)
{
struct mdt_rec_create *rec;
@@ -234,6 +235,7 @@ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_suppgid2 = op_data->op_suppgids[1];
rec->cr_bias = op_data->op_bias;
rec->cr_umask = current_umask();
+ rec->cr_old_handle = op_data->op_handle;
mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
/* the next buffer is child capa, which is used for replay,
@@ -489,6 +491,28 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
}
}
+static void mdc_hsm_release_pack(struct ptlrpc_request *req,
+ struct md_op_data *op_data)
+{
+ if (op_data->op_bias & MDS_HSM_RELEASE) {
+ struct close_data *data;
+ struct ldlm_lock *lock;
+
+ data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
+ LASSERT(data != NULL);
+
+ lock = ldlm_handle2lock(&op_data->op_lease_handle);
+ if (lock != NULL) {
+ data->cd_handle = lock->l_remote_handle;
+ ldlm_lock_put(lock);
+ }
+ ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
+
+ data->cd_data_version = op_data->op_data_version;
+ data->cd_fid = op_data->op_fid2;
+ }
+}
+
void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
{
struct mdt_ioepoch *epoch;
@@ -500,6 +524,7 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
mdc_setattr_pack_rec(rec, op_data);
mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
mdc_ioepoch_pack(epoch, op_data);
+ mdc_hsm_release_pack(req, op_data);
}
static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index fb5a995..8aa7c80 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -39,7 +39,6 @@
# include <linux/module.h>
# include <linux/pagemap.h>
# include <linux/miscdevice.h>
-# include <linux/init.h>
#include <lustre_acl.h>
#include <obd_class.h>
@@ -75,6 +74,12 @@ EXPORT_SYMBOL(it_clear_disposition);
int it_open_error(int phase, struct lookup_intent *it)
{
+ if (it_disposition(it, DISP_OPEN_LEASE)) {
+ if (phase >= DISP_OPEN_LEASE)
+ return it->d.lustre.it_status;
+ else
+ return 0;
+ }
if (it_disposition(it, DISP_OPEN_OPEN)) {
if (phase >= DISP_OPEN_OPEN)
return it->d.lustre.it_status;
@@ -281,14 +286,21 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
/* XXX: openlock is not cancelled for cross-refs. */
/* If inode is known, cancel conflicting OPEN locks. */
if (fid_is_sane(&op_data->op_fid2)) {
- if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
- mode = LCK_CW;
+ if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */
+ if (it->it_flags & FMODE_WRITE)
+ mode = LCK_EX;
+ else
+ mode = LCK_PR;
+ } else {
+ if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
+ mode = LCK_CW;
#ifdef FMODE_EXEC
- else if (it->it_flags & FMODE_EXEC)
- mode = LCK_PR;
+ else if (it->it_flags & FMODE_EXEC)
+ mode = LCK_PR;
#endif
- else
- mode = LCK_CR;
+ else
+ mode = LCK_CR;
+ }
count = mdc_resource_get_unused(exp, &op_data->op_fid2,
&cancels, mode,
MDS_INODELOCK_OPEN);
@@ -347,6 +359,62 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
return req;
}
+static struct ptlrpc_request *
+mdc_intent_getxattr_pack(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct md_op_data *op_data)
+{
+ struct ptlrpc_request *req;
+ struct ldlm_intent *lit;
+ int rc, count = 0, maxdata;
+ LIST_HEAD(cancels);
+
+
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_INTENT_GETXATTR);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ if (it->it_op == IT_SETXATTR)
+ /* If we want to upgrade to LCK_PW, let's cancel LCK_PR
+ * locks now. This avoids unnecessary ASTs. */
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_PW,
+ MDS_INODELOCK_XATTR);
+
+ rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ /* pack the intent */
+ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+ lit->opc = IT_GETXATTR;
+
+ maxdata = class_exp2cliimp(exp)->imp_connect_data.ocd_max_easize;
+
+ /* pack the intended request */
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ op_data->op_valid, maxdata, -1, 0);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
+ RCL_SERVER, maxdata);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EAVALS,
+ RCL_SERVER, maxdata);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS,
+ RCL_SERVER, maxdata);
+
+ ptlrpc_request_set_replen(req);
+
+ return req;
+}
+
static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
struct lookup_intent *it,
struct md_op_data *op_data)
@@ -722,6 +790,8 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
{ .l_inodebits = { MDS_INODELOCK_UPDATE } };
static const ldlm_policy_data_t layout_policy =
{ .l_inodebits = { MDS_INODELOCK_LAYOUT } };
+ static const ldlm_policy_data_t getxattr_policy = {
+ .l_inodebits = { MDS_INODELOCK_XATTR } };
ldlm_policy_data_t const *policy = &lookup_policy;
int generation, resends = 0;
struct ldlm_reply *lockrep;
@@ -738,6 +808,8 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
policy = &update_policy;
else if (it->it_op & IT_LAYOUT)
policy = &layout_policy;
+ else if (it->it_op & (IT_GETXATTR | IT_SETXATTR))
+ policy = &getxattr_policy;
}
LASSERT(reqp == NULL);
@@ -768,9 +840,10 @@ resend:
} else if (it->it_op & IT_LAYOUT) {
if (!imp_connect_lvb_type(class_exp2cliimp(exp)))
return -EOPNOTSUPP;
-
req = mdc_intent_layout_pack(exp, it, op_data);
lvb_type = LVB_T_LAYOUT;
+ } else if (it->it_op & (IT_GETXATTR | IT_SETXATTR)) {
+ req = mdc_intent_getxattr_pack(exp, it, op_data);
} else {
LBUG();
return -EINVAL;
@@ -958,13 +1031,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
LASSERTF(fid_res_name_eq(&mdt_body->fid1,
&lock->l_resource->lr_name),
- "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
- (unsigned long)lock->l_resource->lr_name.name[0],
- (unsigned long)lock->l_resource->lr_name.name[1],
- (unsigned long)lock->l_resource->lr_name.name[2],
- (unsigned long)fid_seq(&mdt_body->fid1),
- (unsigned long)fid_oid(&mdt_body->fid1),
- (unsigned long)fid_ver(&mdt_body->fid1));
+ "Lock res_id: "DLDLMRES", fid: "DFID"\n",
+ PLDLMRES(lock->l_resource), PFID(&mdt_body->fid1));
LDLM_LOCK_PUT(lock);
memcpy(&old_lock, lockh, sizeof(*lockh));
@@ -1065,10 +1133,10 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
LASSERT(it);
CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
- ", intent: %s flags %#o\n", op_data->op_namelen,
- op_data->op_name, PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
- it->it_flags);
+ ", intent: %s flags %#Lo\n", op_data->op_namelen,
+ op_data->op_name, PFID(&op_data->op_fid2),
+ PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
+ it->it_flags);
lockh.cookie = 0;
if (fid_is_sane(&op_data->op_fid2) &&
@@ -1194,9 +1262,10 @@ int mdc_intent_getattr_async(struct obd_export *exp,
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
- CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- ldlm_it2str(it->it_op), it->it_flags);
+ CDEBUG(D_DLMTRACE,
+ "name: %.*s in inode "DFID", intent: %s flags %#Lo\n",
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ ldlm_it2str(it->it_op), it->it_flags);
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index ed3a7a0..d1ad91c3 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -800,10 +800,27 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
- int rc;
+ struct req_format *req_fmt;
+ int rc;
+ int saved_rc = 0;
+
+
+ req_fmt = &RQF_MDS_CLOSE;
+ if (op_data->op_bias & MDS_HSM_RELEASE) {
+ req_fmt = &RQF_MDS_RELEASE_CLOSE;
+
+ /* allocate a FID for volatile file */
+ rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc < 0) {
+ CERROR("%s: "DFID" failed to allocate FID: %d\n",
+ obd->obd_name, PFID(&op_data->op_fid1), rc);
+ /* save the errcode and proceed to close */
+ saved_rc = rc;
+ }
+ }
*request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_CLOSE);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
if (req == NULL)
return -ENOMEM;
@@ -893,7 +910,7 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
}
*request = req;
mdc_close_handle_reply(req, op_data, rc);
- return rc;
+ return rc < 0 ? rc : saved_rc;
}
int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
@@ -1743,6 +1760,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case LL_IOC_HSM_STATE_SET:
rc = mdc_ioc_hsm_state_set(exp, karg);
+ GOTO(out, rc);
case LL_IOC_HSM_ACTION:
rc = mdc_ioc_hsm_current_action(exp, karg);
GOTO(out, rc);
@@ -1814,8 +1832,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct obd_quotactl *oqctl;
OBD_ALLOC_PTR(oqctl);
- if (!oqctl)
- return -ENOMEM;
+ if (oqctl == NULL)
+ GOTO(out, rc = -ENOMEM);
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(exp, oqctl);
@@ -1824,23 +1842,21 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
qctl->qc_valid = QC_MDTIDX;
qctl->obd_uuid = obd->u.cli.cl_target_uuid;
}
+
OBD_FREE_PTR(oqctl);
- break;
+ GOTO(out, rc);
}
- case LL_IOC_GET_CONNECT_FLAGS: {
- if (copy_to_user(uarg,
- exp_connect_flags_ptr(exp),
- sizeof(__u64)))
+ case LL_IOC_GET_CONNECT_FLAGS:
+ if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
+ sizeof(*exp_connect_flags_ptr(exp))))
GOTO(out, rc = -EFAULT);
- else
- GOTO(out, rc = 0);
- }
- case LL_IOC_LOV_SWAP_LAYOUTS: {
+
+ GOTO(out, rc = 0);
+ case LL_IOC_LOV_SWAP_LAYOUTS:
rc = mdc_ioc_swap_layouts(exp, karg);
- break;
- }
+ GOTO(out, rc);
default:
- CERROR("mdc_ioctl(): unrecognised ioctl %#x\n", cmd);
+ CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
GOTO(out, rc = -ENOTTY);
}
out:
@@ -1920,10 +1936,8 @@ static void lustre_swab_hal(struct hsm_action_list *h)
__swab32s(&h->hal_archive_id);
__swab64s(&h->hal_flags);
hai = hai_zero(h);
- for (i = 0; i < h->hal_count; i++) {
+ for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
lustre_swab_hai(hai);
- hai = hai_next(hai);
- }
}
static void lustre_swab_kuch(struct kuc_hdr *l)
@@ -2062,15 +2076,6 @@ int mdc_set_info_async(const struct lu_env *env,
sptlrpc_import_flush_my_ctx(imp);
return 0;
}
- if (KEY_IS(KEY_MDS_CONN)) {
- /* mds-mds import */
- spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- spin_unlock(&imp->imp_lock);
- imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
- CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
- return 0;
- }
if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
@@ -2580,27 +2585,6 @@ static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
return 0;
}
-static int mdc_connect(const struct lu_env *env,
- struct obd_export **exp,
- struct obd_device *obd, struct obd_uuid *cluuid,
- struct obd_connect_data *data,
- void *localdata)
-{
- struct obd_import *imp = obd->u.cli.cl_import;
-
- /* mds-mds import features */
- if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
- spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- spin_unlock(&imp->imp_lock);
- imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
- CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
- obd->obd_name);
- }
-
- return client_connect_import(env, exp, obd, cluuid, data, NULL);
-}
-
struct obd_ops mdc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = mdc_setup,
@@ -2608,7 +2592,7 @@ struct obd_ops mdc_obd_ops = {
.o_cleanup = mdc_cleanup,
.o_add_conn = client_import_add_conn,
.o_del_conn = client_import_del_conn,
- .o_connect = mdc_connect,
+ .o_connect = client_connect_import,
.o_disconnect = client_disconnect_export,
.o_iocontrol = mdc_iocontrol,
.o_set_info_async = mdc_set_info_async,
diff --git a/drivers/staging/lustre/lustre/mgc/Makefile b/drivers/staging/lustre/lustre/mgc/Makefile
index 2672463..2f5ee64 100644
--- a/drivers/staging/lustre/lustre/mgc/Makefile
+++ b/drivers/staging/lustre/lustre/mgc/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += mgc.o
-mgc-y := mgc_request.o lproc_mgc.o
+mgc-y := mgc_request.o
+mgc-$(CONFIG_PROC_FS) += lproc_mgc.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/mgc/libmgc.c b/drivers/staging/lustre/lustre/mgc/libmgc.c
index 7b4947c..9b40c57 100644
--- a/drivers/staging/lustre/lustre/mgc/libmgc.c
+++ b/drivers/staging/lustre/lustre/mgc/libmgc.c
@@ -99,11 +99,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
static int mgc_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
int rc;
- LASSERT(cli->cl_mgc_vfsmnt == NULL);
-
ptlrpcd_decref();
rc = client_obd_cleanup(obd);
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index ebecec2..1506af1 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -40,8 +40,6 @@
#include <lprocfs_status.h>
#include "mgc_internal.h"
-#ifdef LPROCFS
-
LPROC_SEQ_FOPS_RO_TYPE(mgc, uuid);
LPROC_SEQ_FOPS_RO_TYPE(mgc, connect_flags);
LPROC_SEQ_FOPS_RO_TYPE(mgc, server_uuid);
@@ -80,4 +78,3 @@ void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_mgc_module_vars;
lvars->obd_vars = lprocfs_mgc_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
index dbd6982..73b4548 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -48,7 +48,7 @@
void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars);
int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data);
#else
-static void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
+static inline void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
{
memset(lvars, 0, sizeof(*lvars));
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 12a9ede..3bdbb94 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -41,17 +41,14 @@
#define DEBUG_SUBSYSTEM S_MGC
#define D_MGC D_CONFIG /*|D_WARNING*/
-# include <linux/module.h>
-# include <linux/pagemap.h>
-# include <linux/miscdevice.h>
-# include <linux/init.h>
-
+#include <linux/module.h>
#include <obd_class.h>
#include <lustre_dlm.h>
#include <lprocfs_status.h>
#include <lustre_log.h>
-#include <lustre_fsfilt.h>
#include <lustre_disk.h>
+#include <dt_object.h>
+
#include "mgc_internal.h"
static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
@@ -73,7 +70,7 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
memset(res_id, 0, sizeof(*res_id));
res_id->name[0] = cpu_to_le64(resname);
/* XXX: unfortunately, sptlprc and config llog share one lock */
- switch(type) {
+ switch (type) {
case CONFIG_T_CONFIG:
case CONFIG_T_SPTLRPC:
resname = 0;
@@ -400,6 +397,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
return rc;
}
+#ifdef LPROCFS
int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
{
struct obd_device *obd = data;
@@ -423,6 +421,7 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
return 0;
}
+#endif
/* reenqueue any lost locks */
#define RQ_RUNNING 0x1
@@ -578,97 +577,175 @@ static void mgc_requeue_add(struct config_llog_data *cld)
}
/********************** class fns **********************/
+static int mgc_local_llog_init(const struct lu_env *env,
+ struct obd_device *obd,
+ struct obd_device *disk)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_ORIG_CTXT, disk,
+ &llog_osd_ops);
+ if (rc)
+ return rc;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ LASSERT(ctxt);
+ ctxt->loc_dir = obd->u.cli.cl_mgc_configs_dir;
+ llog_ctxt_put(ctxt);
-static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
- struct vfsmount *mnt)
+ return 0;
+}
+
+static int mgc_local_llog_fini(const struct lu_env *env,
+ struct obd_device *obd)
{
- struct lvfs_run_ctxt saved;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct client_obd *cli = &obd->u.cli;
- struct dentry *dentry;
- char *label;
- int err = 0;
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ llog_cleanup(env, ctxt);
+
+ return 0;
+}
+
+static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct client_obd *cli = &obd->u.cli;
+ struct lu_fid rfid, fid;
+ struct dt_object *root, *dto;
+ struct lu_env *env;
+ int rc = 0;
LASSERT(lsi);
- LASSERT(lsi->lsi_srv_mnt == mnt);
+ LASSERT(lsi->lsi_dt_dev);
+
+ OBD_ALLOC_PTR(env);
+ if (env == NULL)
+ return -ENOMEM;
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
down(&cli->cl_mgc_sem);
cfs_cleanup_group_info();
- obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
- if (IS_ERR(obd->obd_fsops)) {
- up(&cli->cl_mgc_sem);
- CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
- obd->obd_name, PTR_ERR(obd->obd_fsops));
- return PTR_ERR(obd->obd_fsops);
- }
+ /* Setup the configs dir */
+ rc = lu_env_init(env, LCT_MG_THREAD);
+ if (rc)
+ GOTO(out_err, rc);
- cli->cl_mgc_vfsmnt = mnt;
- err = fsfilt_setup(obd, mnt->mnt_sb);
- if (err)
- GOTO(err_ops, err);
-
- OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
- obd->obd_lvfs_ctxt.pwdmnt = mnt;
- obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
- obd->obd_lvfs_ctxt.fs = get_ds();
-
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- dentry = ll_lookup_one_len(MOUNT_CONFIGS_DIR, cfs_fs_pwd(current->fs),
- strlen(MOUNT_CONFIGS_DIR));
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- CERROR("cannot lookup %s directory: rc = %d\n",
- MOUNT_CONFIGS_DIR, err);
- GOTO(err_ops, err);
- }
- cli->cl_mgc_configs_dir = dentry;
+ fid.f_seq = FID_SEQ_LOCAL_NAME;
+ fid.f_oid = 1;
+ fid.f_ver = 0;
+ rc = local_oid_storage_init(env, lsi->lsi_dt_dev, &fid,
+ &cli->cl_mgc_los);
+ if (rc)
+ GOTO(out_env, rc);
+
+ rc = dt_root_get(env, lsi->lsi_dt_dev, &rfid);
+ if (rc)
+ GOTO(out_env, rc);
+
+ root = dt_locate_at(env, lsi->lsi_dt_dev, &rfid,
+ &cli->cl_mgc_los->los_dev->dd_lu_dev);
+ if (unlikely(IS_ERR(root)))
+ GOTO(out_los, rc = PTR_ERR(root));
+
+ dto = local_file_find_or_create(env, cli->cl_mgc_los, root,
+ MOUNT_CONFIGS_DIR,
+ S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
+ lu_object_put_nocache(env, &root->do_lu);
+ if (IS_ERR(dto))
+ GOTO(out_los, rc = PTR_ERR(dto));
+
+ cli->cl_mgc_configs_dir = dto;
+
+ LASSERT(lsi->lsi_osd_exp->exp_obd->obd_lvfs_ctxt.dt);
+ rc = mgc_local_llog_init(env, obd, lsi->lsi_osd_exp->exp_obd);
+ if (rc)
+ GOTO(out_llog, rc);
/* We take an obd ref to insure that we can't get to mgc_cleanup
- without calling mgc_fs_cleanup first. */
+ * without calling mgc_fs_cleanup first. */
class_incref(obd, "mgc_fs", obd);
- label = fsfilt_get_label(obd, mnt->mnt_sb);
- if (label)
- CDEBUG(D_MGC, "MGC using disk labelled=%s\n", label);
-
/* We keep the cl_mgc_sem until mgc_fs_cleanup */
- return 0;
-
-err_ops:
- fsfilt_put_ops(obd->obd_fsops);
- obd->obd_fsops = NULL;
- cli->cl_mgc_vfsmnt = NULL;
- up(&cli->cl_mgc_sem);
- return err;
+out_llog:
+ if (rc) {
+ lu_object_put(env, &cli->cl_mgc_configs_dir->do_lu);
+ cli->cl_mgc_configs_dir = NULL;
+ }
+out_los:
+ if (rc < 0) {
+ local_oid_storage_fini(env, cli->cl_mgc_los);
+ cli->cl_mgc_los = NULL;
+ up(&cli->cl_mgc_sem);
+ }
+out_env:
+ lu_env_fini(env);
+out_err:
+ OBD_FREE_PTR(env);
+ return rc;
}
static int mgc_fs_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
- int rc = 0;
+ struct lu_env env;
+ struct client_obd *cli = &obd->u.cli;
+ int rc;
- LASSERT(cli->cl_mgc_vfsmnt != NULL);
+ LASSERT(cli->cl_mgc_los != NULL);
- if (cli->cl_mgc_configs_dir != NULL) {
- struct lvfs_run_ctxt saved;
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- l_dput(cli->cl_mgc_configs_dir);
- cli->cl_mgc_configs_dir = NULL;
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- class_decref(obd, "mgc_fs", obd);
- }
+ rc = lu_env_init(&env, LCT_MG_THREAD);
+ if (rc)
+ GOTO(unlock, rc);
+
+ mgc_local_llog_fini(&env, obd);
+
+ lu_object_put_nocache(&env, &cli->cl_mgc_configs_dir->do_lu);
+ cli->cl_mgc_configs_dir = NULL;
- cli->cl_mgc_vfsmnt = NULL;
- if (obd->obd_fsops)
- fsfilt_put_ops(obd->obd_fsops);
+ local_oid_storage_fini(&env, cli->cl_mgc_los);
+ cli->cl_mgc_los = NULL;
+ lu_env_fini(&env);
+unlock:
+ class_decref(obd, "mgc_fs", obd);
up(&cli->cl_mgc_sem);
- return rc;
+ return 0;
+}
+
+static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ /* setup only remote ctxt, the local disk context is switched per each
+ * filesystem during mgc_fs_setup() */
+ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
+ &llog_client_ops);
+ if (rc)
+ return rc;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ LASSERT(ctxt);
+
+ llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+
+ return 0;
+}
+
+static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(env, ctxt);
+
+ return 0;
}
static atomic_t mgc_count = ATOMIC_INIT(0);
@@ -694,7 +771,7 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
}
}
obd_cleanup_client_import(obd);
- rc = obd_llog_finish(obd, 0);
+ rc = mgc_llog_fini(NULL, obd);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
break;
@@ -704,11 +781,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
static int mgc_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
int rc;
- LASSERT(cli->cl_mgc_vfsmnt == NULL);
-
/* COMPAT_146 - old config logs may have added profiles we don't
know about */
if (obd->obd_type->typ_refcnt <= 1)
@@ -733,7 +807,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
if (rc)
GOTO(err_decref, rc);
- rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
+ rc = mgc_llog_init(NULL, obd);
if (rc) {
CERROR("failed to setup llogging subsystems\n");
GOTO(err_cleanup, rc);
@@ -788,8 +862,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
/* We've given up the lock, prepare ourselves to update. */
LDLM_DEBUG(lock, "MGC cancel CB");
- CDEBUG(D_MGC, "Lock res "LPX64" (%.8s)\n",
- lock->l_resource->lr_name.name[0],
+ CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
+ PLDLMRES(lock->l_resource),
(char *)&lock->l_resource->lr_name.name[0]);
if (!cld) {
@@ -1011,23 +1085,23 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
}
if (KEY_IS(KEY_SET_FS)) {
struct super_block *sb = (struct super_block *)val;
- struct lustre_sb_info *lsi;
+
if (vallen != sizeof(struct super_block))
return -EINVAL;
- lsi = s2lsi(sb);
- rc = mgc_fs_setup(exp->exp_obd, sb, lsi->lsi_srv_mnt);
- if (rc) {
+
+ rc = mgc_fs_setup(exp->exp_obd, sb);
+ if (rc)
CERROR("set_fs got %d\n", rc);
- }
+
return rc;
}
if (KEY_IS(KEY_CLEAR_FS)) {
if (vallen != 0)
return -EINVAL;
rc = mgc_fs_cleanup(exp->exp_obd);
- if (rc) {
+ if (rc)
CERROR("clear_fs got %d\n", rc);
- }
+
return rc;
}
if (KEY_IS(KEY_SET_INFO)) {
@@ -1145,49 +1219,6 @@ static int mgc_import_event(struct obd_device *obd,
return rc;
}
-static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
- struct obd_device *tgt, int *index)
-{
- struct llog_ctxt *ctxt;
- int rc;
-
- LASSERT(olg == &obd->obd_olg);
-
-
- rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
- &llog_client_ops);
- if (rc)
- GOTO(out, rc);
-
- ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_REPL_CTXT);
- if (!ctxt)
- GOTO(out, rc = -ENODEV);
-
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
-
- return 0;
-out:
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
- return rc;
-}
-
-static int mgc_llog_finish(struct obd_device *obd, int count)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
- return 0;
-}
-
enum {
CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
CONFIG_READ_NRPAGES = 4
@@ -1540,17 +1571,58 @@ out:
return rc;
}
+static int mgc_llog_local_copy(const struct lu_env *env,
+ struct obd_device *obd,
+ struct llog_ctxt *rctxt,
+ struct llog_ctxt *lctxt, char *logname)
+{
+ char *temp_log;
+ int rc;
+
+
+
+ /*
+ * - copy it to backup using llog_backup()
+ * - copy remote llog to logname using llog_backup()
+ * - if failed then move bakup to logname again
+ */
+
+ OBD_ALLOC(temp_log, strlen(logname) + 1);
+ if (!temp_log)
+ return -ENOMEM;
+ sprintf(temp_log, "%sT", logname);
+
+ /* make a copy of local llog at first */
+ rc = llog_backup(env, obd, lctxt, lctxt, logname, temp_log);
+ if (rc < 0 && rc != -ENOENT)
+ GOTO(out, rc);
+ /* copy remote llog to the local copy */
+ rc = llog_backup(env, obd, rctxt, lctxt, logname, logname);
+ if (rc == -ENOENT) {
+ /* no remote llog, delete local one too */
+ llog_erase(env, lctxt, NULL, logname);
+ } else if (rc < 0) {
+ /* error during backup, get local one back from the copy */
+ llog_backup(env, obd, lctxt, lctxt, temp_log, logname);
+out:
+ CERROR("%s: failed to copy remote log %s: rc = %d\n",
+ obd->obd_name, logname, rc);
+ }
+ llog_erase(env, lctxt, NULL, temp_log);
+ OBD_FREE(temp_log, strlen(logname) + 1);
+ return rc;
+}
/* local_only means it cannot get remote llogs */
static int mgc_process_cfg_log(struct obd_device *mgc,
- struct config_llog_data *cld,
- int local_only)
+ struct config_llog_data *cld, int local_only)
{
- struct llog_ctxt *ctxt, *lctxt = NULL;
- struct lvfs_run_ctxt *saved_ctxt;
- struct lustre_sb_info *lsi = NULL;
- int rc = 0, must_pop = 0;
- bool sptlrpc_started = false;
+ struct llog_ctxt *ctxt, *lctxt = NULL;
+ struct dt_object *cl_mgc_dir = mgc->u.cli.cl_mgc_configs_dir;
+ struct lustre_sb_info *lsi = NULL;
+ int rc = 0;
+ bool sptlrpc_started = false;
+ struct lu_env *env;
LASSERT(cld);
LASSERT(mutex_is_locked(&cld->cld_lock));
@@ -1565,20 +1637,48 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
if (cld->cld_cfg.cfg_sb)
lsi = s2lsi(cld->cld_cfg.cfg_sb);
- ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
- if (!ctxt) {
- CERROR("missing llog context\n");
- return -EINVAL;
- }
-
- OBD_ALLOC_PTR(saved_ctxt);
- if (saved_ctxt == NULL)
+ OBD_ALLOC_PTR(env);
+ if (env == NULL)
return -ENOMEM;
+ rc = lu_env_init(env, LCT_MG_THREAD);
+ if (rc)
+ GOTO(out_free, rc);
+
+ ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
+ LASSERT(ctxt);
+
lctxt = llog_get_context(mgc, LLOG_CONFIG_ORIG_CTXT);
- if (local_only) { /* no local log at client side */
- GOTO(out_pop, rc = -EIO);
+ /* Copy the setup log locally if we can. Don't mess around if we're
+ * running an MGS though (logs are already local). */
+ if (lctxt && lsi && IS_SERVER(lsi) && !IS_MGS(lsi) &&
+ cl_mgc_dir != NULL &&
+ lu2dt_dev(cl_mgc_dir->do_lu.lo_dev) == lsi->lsi_dt_dev) {
+ if (!local_only)
+ /* Only try to copy log if we have the lock. */
+ rc = mgc_llog_local_copy(env, mgc, ctxt, lctxt,
+ cld->cld_logname);
+ if (local_only || rc) {
+ if (llog_is_empty(env, lctxt, cld->cld_logname)) {
+ LCONSOLE_ERROR_MSG(0x13a,
+ "Failed to get MGS log %s and no local copy.\n",
+ cld->cld_logname);
+ GOTO(out_pop, rc = -ENOTCONN);
+ }
+ CDEBUG(D_MGC,
+ "Failed to get MGS log %s, using local copy for now, will try to update later.\n",
+ cld->cld_logname);
+ }
+ /* Now, whether we copied or not, start using the local llog.
+ * If we failed to copy, we'll start using whatever the old
+ * log has. */
+ llog_ctxt_put(ctxt);
+ ctxt = lctxt;
+ lctxt = NULL;
+ } else {
+ if (local_only) /* no local log at client side */
+ GOTO(out_pop, rc = -EIO);
}
if (cld_is_sptlrpc(cld)) {
@@ -1587,19 +1687,16 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
}
/* logname and instance info should be the same, so use our
- copy of the instance for the update. The cfg_last_idx will
- be updated here. */
- rc = class_config_parse_llog(NULL, ctxt, cld->cld_logname,
+ * copy of the instance for the update. The cfg_last_idx will
+ * be updated here. */
+ rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
&cld->cld_cfg);
out_pop:
- llog_ctxt_put(ctxt);
+ __llog_ctxt_put(env, ctxt);
if (lctxt)
- llog_ctxt_put(lctxt);
- if (must_pop)
- pop_ctxt(saved_ctxt, &mgc->obd_lvfs_ctxt, NULL);
+ __llog_ctxt_put(env, lctxt);
- OBD_FREE_PTR(saved_ctxt);
/*
* update settings on existing OBDs. doing it inside
* of llog_process_lock so no device is attaching/detaching
@@ -1614,6 +1711,9 @@ out_pop:
strlen("-sptlrpc"));
}
+ lu_env_fini(env);
+out_free:
+ OBD_FREE_PTR(env);
return rc;
}
@@ -1700,7 +1800,7 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
char *logname;
int rc = 0;
- switch(lcfg->lcfg_command) {
+ switch (lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD: {
/* Overloading this cfg command: register a new target */
struct mgs_target_info *mti;
@@ -1795,14 +1895,12 @@ struct obd_ops mgc_obd_ops = {
.o_del_conn = client_import_del_conn,
.o_connect = client_connect_import,
.o_disconnect = client_disconnect_export,
- //.o_enqueue = mgc_enqueue,
+ /* .o_enqueue = mgc_enqueue, */
.o_cancel = mgc_cancel,
- //.o_iocontrol = mgc_iocontrol,
+ /* .o_iocontrol = mgc_iocontrol, */
.o_set_info_async = mgc_set_info_async,
.o_get_info = mgc_get_info,
.o_import_event = mgc_import_event,
- .o_llog_init = mgc_llog_init,
- .o_llog_finish = mgc_llog_finish,
.o_process_config = mgc_process_config,
};
diff --git a/drivers/staging/lustre/lustre/obdclass/capa.c b/drivers/staging/lustre/lustre/obdclass/capa.c
index 68d797b..be1c613 100644
--- a/drivers/staging/lustre/lustre/obdclass/capa.c
+++ b/drivers/staging/lustre/lustre/obdclass/capa.c
@@ -46,7 +46,6 @@
#include <asm/unistd.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/crypto.h>
#include <obd_class.h>
@@ -273,10 +272,10 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
alg = &capa_hmac_algs[capa_alg(capa)];
tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
- if (!tfm) {
+ if (IS_ERR(tfm)) {
CERROR("crypto_alloc_tfm failed, check whether your kernel"
"has crypto support!\n");
- return -ENOMEM;
+ return PTR_ERR(tfm);
}
keylen = alg->ha_keylen;
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 4afd962..c93131e 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -54,7 +54,13 @@ struct list_head obd_types;
DEFINE_RWLOCK(obd_dev_lock);
__u64 obd_max_pages = 0;
+EXPORT_SYMBOL(obd_max_pages);
__u64 obd_max_alloc = 0;
+EXPORT_SYMBOL(obd_max_alloc);
+__u64 obd_alloc;
+EXPORT_SYMBOL(obd_alloc);
+__u64 obd_pages;
+EXPORT_SYMBOL(obd_pages);
DEFINE_SPINLOCK(obd_updatemax_lock);
/* The following are visible and mutable through /proc/sys/lustre/. */
@@ -501,8 +507,15 @@ int obd_init_checks(void)
}
extern spinlock_t obd_types_lock;
+#ifdef LPROCFS
extern int class_procfs_init(void);
extern int class_procfs_clean(void);
+#else
+static inline int class_procfs_init(void)
+{ return 0; }
+static inline int class_procfs_clean(void)
+{ return 0; }
+#endif
static int __init init_obdclass(void)
{
@@ -516,7 +529,7 @@ static int __init init_obdclass(void)
spin_lock_init(&obd_types_lock);
obd_zombie_impexp_init();
-#ifdef LPROCFS
+
obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM,
LPROCFS_STATS_FLAG_NONE |
LPROCFS_STATS_FLAG_IRQ_SAFE);
@@ -531,7 +544,7 @@ static int __init init_obdclass(void)
lprocfs_counter_init(obd_memory, OBD_MEMORY_PAGES_STAT,
LPROCFS_CNTR_AVGMINMAX,
"pagesused", "pages");
-#endif
+
err = obd_init_checks();
if (err == -EOVERFLOW)
return err;
@@ -564,6 +577,9 @@ static int __init init_obdclass(void)
err = obd_init_caches();
if (err)
return err;
+
+ obd_sysctl_init();
+
err = class_procfs_init();
if (err)
return err;
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index f6fae16..d9f750d 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -193,7 +193,6 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
strcpy(type->typ_name, name);
spin_lock_init(&type->obd_type_lock);
-#ifdef LPROCFS
type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
vars, type);
if (IS_ERR(type->typ_procroot)) {
@@ -201,7 +200,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
type->typ_procroot = NULL;
GOTO (failed, rc);
}
-#endif
+
if (ldt != NULL) {
type->typ_lu = ldt;
rc = lu_device_type_init(ldt);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index d1a57eb..121a856 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -56,7 +56,6 @@
#include <linux/proc_fs.h>
#include <linux/fs.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <asm/io.h>
@@ -295,9 +294,6 @@ struct lprocfs_vars lprocfs_base[] = {
{ "jobid_var", &obd_proc_jobid_var_fops },
{ 0 }
};
-#else
-#define lprocfs_base NULL
-#endif /* LPROCFS */
static void *obd_device_list_seq_start(struct seq_file *p, loff_t *pos)
{
@@ -380,7 +376,6 @@ int class_procfs_init(void)
{
int rc = 0;
- obd_sysctl_init();
proc_lustre_root = lprocfs_register("fs/lustre", NULL,
lprocfs_base, NULL);
if (IS_ERR(proc_lustre_root)) {
@@ -404,3 +399,4 @@ int class_procfs_clean(void)
}
return 0;
}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index acd2619..c1ef0c9 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -282,7 +282,6 @@ int LL_PROC_PROTO(proc_at_history)
#ifdef CONFIG_SYSCTL
static ctl_table_t obd_table[] = {
{
- INIT_CTL_NAME(OBD_TIMEOUT)
.procname = "timeout",
.data = &obd_timeout,
.maxlen = sizeof(int),
@@ -290,7 +289,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_set_timeout
},
{
- INIT_CTL_NAME(OBD_DEBUG_PEER_ON_TIMEOUT)
.procname = "debug_peer_on_timeout",
.data = &obd_debug_peer_on_timeout,
.maxlen = sizeof(int),
@@ -298,7 +296,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(OBD_DUMP_ON_TIMEOUT)
.procname = "dump_on_timeout",
.data = &obd_dump_on_timeout,
.maxlen = sizeof(int),
@@ -306,7 +303,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(OBD_DUMP_ON_EVICTION)
.procname = "dump_on_eviction",
.data = &obd_dump_on_eviction,
.maxlen = sizeof(int),
@@ -314,7 +310,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(OBD_MEMUSED)
.procname = "memused",
.data = NULL,
.maxlen = 0,
@@ -322,7 +317,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_memory_alloc
},
{
- INIT_CTL_NAME(OBD_PAGESUSED)
.procname = "pagesused",
.data = NULL,
.maxlen = 0,
@@ -330,7 +324,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_pages_alloc
},
{
- INIT_CTL_NAME(OBD_MAXMEMUSED)
.procname = "memused_max",
.data = NULL,
.maxlen = 0,
@@ -338,7 +331,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_mem_max
},
{
- INIT_CTL_NAME(OBD_MAXPAGESUSED)
.procname = "pagesused_max",
.data = NULL,
.maxlen = 0,
@@ -346,7 +338,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_pages_max
},
{
- INIT_CTL_NAME(OBD_LDLM_TIMEOUT)
.procname = "ldlm_timeout",
.data = &ldlm_timeout,
.maxlen = sizeof(int),
@@ -354,7 +345,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_set_timeout
},
{
- INIT_CTL_NAME(OBD_ALLOC_FAIL_RATE)
.procname = "alloc_fail_rate",
.data = &obd_alloc_fail_rate,
.maxlen = sizeof(int),
@@ -362,7 +352,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_alloc_fail_rate
},
{
- INIT_CTL_NAME(OBD_MAX_DIRTY_PAGES)
.procname = "max_dirty_mb",
.data = &obd_max_dirty_pages,
.maxlen = sizeof(int),
@@ -370,7 +359,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_max_dirty_pages_in_mb
},
{
- INIT_CTL_NAME(OBD_AT_MIN)
.procname = "at_min",
.data = &at_min,
.maxlen = sizeof(int),
@@ -378,7 +366,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_min
},
{
- INIT_CTL_NAME(OBD_AT_MAX)
.procname = "at_max",
.data = &at_max,
.maxlen = sizeof(int),
@@ -386,7 +373,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_max
},
{
- INIT_CTL_NAME(OBD_AT_EXTRA)
.procname = "at_extra",
.data = &at_extra,
.maxlen = sizeof(int),
@@ -394,7 +380,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_extra
},
{
- INIT_CTL_NAME(OBD_AT_EARLY_MARGIN)
.procname = "at_early_margin",
.data = &at_early_margin,
.maxlen = sizeof(int),
@@ -402,26 +387,24 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_early_margin
},
{
- INIT_CTL_NAME(OBD_AT_HISTORY)
.procname = "at_history",
.data = &at_history,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_at_history
},
- { INIT_CTL_NAME(0) }
+ {}
};
static ctl_table_t parent_table[] = {
{
- INIT_CTL_NAME(OBD_SYSCTL)
.procname = "lustre",
.data = NULL,
.maxlen = 0,
.mode = 0555,
.child = obd_table
},
- { INIT_CTL_NAME(0) }
+ {}
};
#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 0cb4428..e0dfb08 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -62,7 +62,7 @@ struct llog_handle *llog_alloc_handle(void)
OBD_ALLOC_PTR(loghandle);
if (loghandle == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
init_rwsem(&loghandle->lgh_lock);
spin_lock_init(&loghandle->lgh_hdr_lock);
@@ -265,31 +265,6 @@ out:
}
EXPORT_SYMBOL(llog_init_handle);
-int llog_copy_handler(const struct lu_env *env,
- struct llog_handle *llh,
- struct llog_rec_hdr *rec,
- void *data)
-{
- struct llog_rec_hdr local_rec = *rec;
- struct llog_handle *local_llh = (struct llog_handle *)data;
- char *cfg_buf = (char*) (rec + 1);
- struct lustre_cfg *lcfg;
- int rc = 0;
-
- /* Append all records */
- local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
- rc = llog_write(env, local_llh, &local_rec, NULL, 0,
- (void *)cfg_buf, -1);
-
- lcfg = (struct lustre_cfg *)cfg_buf;
- CDEBUG(D_INFO, "idx=%d, rc=%d, len=%d, cmd %x %s %s\n",
- rec->lrh_index, rc, rec->lrh_len, lcfg->lcfg_command,
- lustre_cfg_string(lcfg, 0), lustre_cfg_string(lcfg, 1));
-
- return rc;
-}
-EXPORT_SYMBOL(llog_copy_handler);
-
static int llog_process_thread(void *arg)
{
struct llog_process_info *lpi = arg;
@@ -493,14 +468,6 @@ int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
}
EXPORT_SYMBOL(llog_process);
-inline int llog_get_size(struct llog_handle *loghandle)
-{
- if (loghandle && loghandle->lgh_hdr)
- return loghandle->lgh_hdr->llh_count;
- return 0;
-}
-EXPORT_SYMBOL(llog_get_size);
-
int llog_reverse_process(const struct lu_env *env,
struct llog_handle *loghandle, llog_cb_t cb,
void *data, void *catdata)
@@ -767,8 +734,9 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle **res, struct llog_logid *logid,
char *name)
{
- struct thandle *th;
- int rc;
+ struct dt_device *d;
+ struct thandle *th;
+ int rc;
rc = llog_open(env, ctxt, res, logid, name, LLOG_OPEN_NEW);
if (rc)
@@ -777,27 +745,21 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
if (llog_exist(*res))
return 0;
- if ((*res)->lgh_obj != NULL) {
- struct dt_device *d;
+ LASSERT((*res)->lgh_obj != NULL);
- d = lu2dt_dev((*res)->lgh_obj->do_lu.lo_dev);
+ d = lu2dt_dev((*res)->lgh_obj->do_lu.lo_dev);
- th = dt_trans_create(env, d);
- if (IS_ERR(th))
- GOTO(out, rc = PTR_ERR(th));
+ th = dt_trans_create(env, d);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
- rc = llog_declare_create(env, *res, th);
- if (rc == 0) {
- rc = dt_trans_start_local(env, d, th);
- if (rc == 0)
- rc = llog_create(env, *res, th);
- }
- dt_trans_stop(env, d, th);
- } else {
- /* lvfs compat code */
- LASSERT((*res)->lgh_file == NULL);
- rc = llog_create(env, *res, NULL);
+ rc = llog_declare_create(env, *res, th);
+ if (rc == 0) {
+ rc = dt_trans_start_local(env, d, th);
+ if (rc == 0)
+ rc = llog_create(env, *res, th);
}
+ dt_trans_stop(env, d, th);
out:
if (rc)
llog_close(env, *res);
@@ -842,41 +804,34 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
int cookiecount, void *buf, int idx)
{
- int rc;
+ struct dt_device *dt;
+ struct thandle *th;
+ int rc;
LASSERT(loghandle);
LASSERT(loghandle->lgh_ctxt);
+ LASSERT(loghandle->lgh_obj != NULL);
- if (loghandle->lgh_obj != NULL) {
- struct dt_device *dt;
- struct thandle *th;
-
- dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev);
+ dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev);
- th = dt_trans_create(env, dt);
- if (IS_ERR(th))
- return PTR_ERR(th);
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
- rc = llog_declare_write_rec(env, loghandle, rec, idx, th);
- if (rc)
- GOTO(out_trans, rc);
+ rc = llog_declare_write_rec(env, loghandle, rec, idx, th);
+ if (rc)
+ GOTO(out_trans, rc);
- rc = dt_trans_start_local(env, dt, th);
- if (rc)
- GOTO(out_trans, rc);
+ rc = dt_trans_start_local(env, dt, th);
+ if (rc)
+ GOTO(out_trans, rc);
- down_write(&loghandle->lgh_lock);
- rc = llog_write_rec(env, loghandle, rec, reccookie,
- cookiecount, buf, idx, th);
- up_write(&loghandle->lgh_lock);
+ down_write(&loghandle->lgh_lock);
+ rc = llog_write_rec(env, loghandle, rec, reccookie,
+ cookiecount, buf, idx, th);
+ up_write(&loghandle->lgh_lock);
out_trans:
- dt_trans_stop(env, dt, th);
- } else { /* lvfs compatibility */
- down_write(&loghandle->lgh_lock);
- rc = llog_write_rec(env, loghandle, rec, reccookie,
- cookiecount, buf, idx, NULL);
- up_write(&loghandle->lgh_lock);
- }
+ dt_trans_stop(env, dt, th);
return rc;
}
EXPORT_SYMBOL(llog_write);
@@ -932,3 +887,104 @@ out:
return rc;
}
EXPORT_SYMBOL(llog_close);
+
+int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name)
+{
+ struct llog_handle *llh;
+ int rc = 0;
+
+ rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ if (likely(rc == -ENOENT))
+ rc = 0;
+ GOTO(out, rc);
+ }
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_close, rc);
+ rc = llog_get_size(llh);
+
+out_close:
+ llog_close(env, llh);
+out:
+ /* header is record 1 */
+ return rc <= 1;
+}
+EXPORT_SYMBOL(llog_is_empty);
+
+int llog_copy_handler(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_handle *copy_llh = data;
+
+ /* Append all records */
+ return llog_write(env, copy_llh, rec, NULL, 0, NULL, -1);
+}
+EXPORT_SYMBOL(llog_copy_handler);
+
+/* backup plain llog */
+int llog_backup(const struct lu_env *env, struct obd_device *obd,
+ struct llog_ctxt *ctxt, struct llog_ctxt *bctxt,
+ char *name, char *backup)
+{
+ struct llog_handle *llh, *bllh;
+ int rc;
+
+
+
+ /* open original log */
+ rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ /* the -ENOENT case is also reported to the caller
+ * but silently so it should handle that if needed.
+ */
+ if (rc != -ENOENT)
+ CERROR("%s: failed to open log %s: rc = %d\n",
+ obd->obd_name, name, rc);
+ return rc;
+ }
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_close, rc);
+
+ /* Make sure there's no old backup log */
+ rc = llog_erase(env, bctxt, NULL, backup);
+ if (rc < 0 && rc != -ENOENT)
+ GOTO(out_close, rc);
+
+ /* open backup log */
+ rc = llog_open_create(env, bctxt, &bllh, NULL, backup);
+ if (rc) {
+ CERROR("%s: failed to open backup logfile %s: rc = %d\n",
+ obd->obd_name, backup, rc);
+ GOTO(out_close, rc);
+ }
+
+ /* check that backup llog is not the same object as original one */
+ if (llh->lgh_obj == bllh->lgh_obj) {
+ CERROR("%s: backup llog %s to itself (%s), objects %p/%p\n",
+ obd->obd_name, name, backup, llh->lgh_obj,
+ bllh->lgh_obj);
+ GOTO(out_backup, rc = -EEXIST);
+ }
+
+ rc = llog_init_handle(env, bllh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_backup, rc);
+
+ /* Copy log record by record */
+ rc = llog_process_or_fork(env, llh, llog_copy_handler, (void *)bllh,
+ NULL, false);
+ if (rc)
+ CERROR("%s: failed to backup log %s: rc = %d\n",
+ obd->obd_name, name, rc);
+out_backup:
+ llog_close(env, bllh);
+out_close:
+ llog_close(env, llh);
+ return rc;
+}
+EXPORT_SYMBOL(llog_backup);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_test.c b/drivers/staging/lustre/lustre/obdclass/llog_test.c
index 178f89e..764068f 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_test.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_test.c
@@ -947,6 +947,10 @@ static void lprocfs_llog_test_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_llog_test_module_vars;
lvars->obd_vars = lprocfs_llog_test_obd_vars;
}
+#else
+static void lprocfs_llog_test_init_vars(struct lprocfs_static_vars *lvars)
+{
+}
#endif
static int llog_test_cleanup(struct obd_device *obd)
@@ -1048,7 +1052,7 @@ static struct obd_ops llog_obd_ops = {
static int __init llog_test_init(void)
{
- struct lprocfs_static_vars lvars;
+ struct lprocfs_static_vars uninitialized_var(lvars);
lprocfs_llog_test_init_vars(&lvars);
return class_register_type(&llog_obd_ops, NULL,
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.c b/drivers/staging/lustre/lustre/obdclass/local_storage.c
index cc19fba..e79e4be 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.c
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.c
@@ -246,7 +246,7 @@ int local_object_create(const struct lu_env *env,
struct dt_object_format *dof, struct thandle *th)
{
struct dt_thread_info *dti = dt_info(env);
- obd_id lastid;
+ __le64 lastid;
int rc;
rc = dt_create(env, o, attr, NULL, dof, th);
@@ -855,9 +855,12 @@ out_los:
(*los)->los_seq = fid_seq(first_fid);
(*los)->los_last_oid = le64_to_cpu(lastid);
(*los)->los_obj = o;
- /* read value should not be less than initial one */
- LASSERTF((*los)->los_last_oid >= first_oid, "%u < %u\n",
- (*los)->los_last_oid, first_oid);
+ /* Read value should not be less than initial one
+ * but possible after upgrade from older fs.
+ * In this case just switch to the first_oid in memory and
+ * it will be updated on disk with first object generated */
+ if ((*los)->los_last_oid < first_oid)
+ (*los)->los_last_oid = first_oid;
}
out:
mutex_unlock(&ls->ls_los_mutex);
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.h b/drivers/staging/lustre/lustre/obdclass/local_storage.h
index d553c37..0f63b8c 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.h
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.h
@@ -29,6 +29,8 @@
*
* Author: Mikhail Pershin <mike.pershin@intel.com>
*/
+#ifndef __LOCAL_STORAGE_H
+#define __LOCAL_STORAGE_H
#include <dt_object.h>
#include <obd.h>
@@ -86,3 +88,4 @@ struct los_ondisk {
};
#define LOS_MAGIC 0xdecafbee
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 02d76f8..ec3b605 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -46,11 +46,183 @@
#include <lustre/lustre_idl.h>
#include <linux/seq_file.h>
-#if defined(LPROCFS)
+static const char * const obd_connect_names[] = {
+ "read_only",
+ "lov_index",
+ "unused",
+ "write_grant",
+ "server_lock",
+ "version",
+ "request_portal",
+ "acl",
+ "xattr",
+ "create_on_write",
+ "truncate_lock",
+ "initial_transno",
+ "inode_bit_locks",
+ "join_file(obsolete)",
+ "getattr_by_fid",
+ "no_oh_for_devices",
+ "remote_client",
+ "remote_client_by_force",
+ "max_byte_per_rpc",
+ "64bit_qdata",
+ "mds_capability",
+ "oss_capability",
+ "early_lock_cancel",
+ "som",
+ "adaptive_timeouts",
+ "lru_resize",
+ "mds_mds_connection",
+ "real_conn",
+ "change_qunit_size",
+ "alt_checksum_algorithm",
+ "fid_is_enabled",
+ "version_recovery",
+ "pools",
+ "grant_shrink",
+ "skip_orphan",
+ "large_ea",
+ "full20",
+ "layout_lock",
+ "64bithash",
+ "object_max_bytes",
+ "imp_recov",
+ "jobstats",
+ "umask",
+ "einprogress",
+ "grant_param",
+ "flock_owner",
+ "lvb_type",
+ "nanoseconds_times",
+ "lightweight_conn",
+ "short_io",
+ "pingless",
+ "unknown",
+ NULL
+};
+
+int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
+{
+ __u64 mask = 1;
+ int i, ret = 0;
+
+ for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+ if (flags & mask)
+ ret += snprintf(page + ret, count - ret, "%s%s",
+ ret ? sep : "", obd_connect_names[i]);
+ }
+ if (flags & ~(mask - 1))
+ ret += snprintf(page + ret, count - ret,
+ "%sunknown flags "LPX64,
+ ret ? sep : "", flags & ~(mask - 1));
+ return ret;
+}
+EXPORT_SYMBOL(obd_connect_flags2str);
+
+int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
+ int mult)
+{
+ long decimal_val, frac_val;
+ int prtn;
+
+ if (count < 10)
+ return -EINVAL;
+
+ decimal_val = val / mult;
+ prtn = snprintf(buffer, count, "%ld", decimal_val);
+ frac_val = val % mult;
+
+ if (prtn < (count - 4) && frac_val > 0) {
+ long temp_frac;
+ int i, temp_mult = 1, frac_bits = 0;
+
+ temp_frac = frac_val * 10;
+ buffer[prtn++] = '.';
+ while (frac_bits < 2 && (temp_frac / mult) < 1) {
+ /* only reserved 2 bits fraction */
+ buffer[prtn++] = '0';
+ temp_frac *= 10;
+ frac_bits++;
+ }
+ /*
+ * Need to think these cases :
+ * 1. #echo x.00 > /proc/xxx output result : x
+ * 2. #echo x.0x > /proc/xxx output result : x.0x
+ * 3. #echo x.x0 > /proc/xxx output result : x.x
+ * 4. #echo x.xx > /proc/xxx output result : x.xx
+ * Only reserved 2 bits fraction.
+ */
+ for (i = 0; i < (5 - prtn); i++)
+ temp_mult *= 10;
+
+ frac_bits = min((int)count - prtn, 3 - frac_bits);
+ prtn += snprintf(buffer + prtn, frac_bits, "%ld",
+ frac_val * temp_mult / mult);
+
+ prtn--;
+ while (buffer[prtn] < '1' || buffer[prtn] > '9') {
+ prtn--;
+ if (buffer[prtn] == '.') {
+ prtn--;
+ break;
+ }
+ }
+ prtn++;
+ }
+ buffer[prtn++] = '\n';
+ return prtn;
+}
+EXPORT_SYMBOL(lprocfs_read_frac_helper);
+
+int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+ int *val, int mult)
+{
+ char kernbuf[20], *end, *pbuf;
+
+ if (count > (sizeof(kernbuf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+
+ kernbuf[count] = '\0';
+ pbuf = kernbuf;
+ if (*pbuf == '-') {
+ mult = -mult;
+ pbuf++;
+ }
+
+ *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
+ if (pbuf == end)
+ return -EINVAL;
+
+ if (end != NULL && *end == '.') {
+ int temp_val, pow = 1;
+ int i;
+
+ pbuf = end + 1;
+ if (strlen(pbuf) > 5)
+ pbuf[5] = '\0'; /*only allow 5bits fractional*/
+
+ temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
+
+ if (pbuf < end) {
+ for (i = 0; i < (end - pbuf); i++)
+ pow *= 10;
+
+ *val += temp_val / pow;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_write_frac_helper);
+
+#ifdef LPROCFS
static int lprocfs_no_percpu_stats = 0;
-CFS_MODULE_PARM(lprocfs_no_percpu_stats, "i", int, 0644,
- "Do not alloc percpu data for lprocfs stats");
+module_param(lprocfs_no_percpu_stats, int, 0644);
+MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats");
#define MAX_STRING_SIZE 128
@@ -420,7 +592,6 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
{
unsigned int num_entry;
struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *cntr_header;
int i;
unsigned long flags = 0;
@@ -439,7 +610,6 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
for (i = 0; i < num_entry; i++) {
if (stats->ls_percpu[i] == NULL)
continue;
- cntr_header = &stats->ls_cnt_header[idx];
percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
cnt->lc_count += percpu_cntr->lc_count;
@@ -481,62 +651,6 @@ static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
}
#undef flags2str
-static const char *obd_connect_names[] = {
- "read_only",
- "lov_index",
- "unused",
- "write_grant",
- "server_lock",
- "version",
- "request_portal",
- "acl",
- "xattr",
- "create_on_write",
- "truncate_lock",
- "initial_transno",
- "inode_bit_locks",
- "join_file(obsolete)",
- "getattr_by_fid",
- "no_oh_for_devices",
- "remote_client",
- "remote_client_by_force",
- "max_byte_per_rpc",
- "64bit_qdata",
- "mds_capability",
- "oss_capability",
- "early_lock_cancel",
- "som",
- "adaptive_timeouts",
- "lru_resize",
- "mds_mds_connection",
- "real_conn",
- "change_qunit_size",
- "alt_checksum_algorithm",
- "fid_is_enabled",
- "version_recovery",
- "pools",
- "grant_shrink",
- "skip_orphan",
- "large_ea",
- "full20",
- "layout_lock",
- "64bithash",
- "object_max_bytes",
- "imp_recov",
- "jobstats",
- "umask",
- "einprogress",
- "grant_param",
- "flock_owner",
- "lvb_type",
- "nanoseconds_times",
- "lightweight_conn",
- "short_io",
- "pingless",
- "unknown",
- NULL
-};
-
static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep)
{
__u64 mask = 1;
@@ -555,24 +669,6 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep
first ? sep : "", flags & ~(mask - 1));
}
-int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
-{
- __u64 mask = 1;
- int i, ret = 0;
-
- for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
- if (flags & mask)
- ret += snprintf(page + ret, count - ret, "%s%s",
- ret ? sep : "", obd_connect_names[i]);
- }
- if (flags & ~(mask - 1))
- ret += snprintf(page + ret, count - ret,
- "%sunknown flags "LPX64,
- ret ? sep : "", flags & ~(mask - 1));
- return ret;
-}
-EXPORT_SYMBOL(obd_connect_flags2str);
-
int lprocfs_rd_import(struct seq_file *m, void *data)
{
struct lprocfs_counter ret;
@@ -999,7 +1095,6 @@ EXPORT_SYMBOL(lprocfs_free_stats);
void lprocfs_clear_stats(struct lprocfs_stats *stats)
{
struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *header;
int i;
int j;
unsigned int num_entry;
@@ -1011,7 +1106,6 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats)
if (stats->ls_percpu[i] == NULL)
continue;
for (j = 0; j < stats->ls_num; j++) {
- header = &stats->ls_cnt_header[j];
percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
percpu_cntr->lc_count = 0;
percpu_cntr->lc_min = LC_MIN_INIT;
@@ -1662,104 +1756,6 @@ int lprocfs_write_helper(const char *buffer, unsigned long count,
}
EXPORT_SYMBOL(lprocfs_write_helper);
-int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
- int *val, int mult)
-{
- char kernbuf[20], *end, *pbuf;
-
- if (count > (sizeof(kernbuf) - 1))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = '\0';
- pbuf = kernbuf;
- if (*pbuf == '-') {
- mult = -mult;
- pbuf++;
- }
-
- *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
- if (pbuf == end)
- return -EINVAL;
-
- if (end != NULL && *end == '.') {
- int temp_val, pow = 1;
- int i;
-
- pbuf = end + 1;
- if (strlen(pbuf) > 5)
- pbuf[5] = '\0'; /*only allow 5bits fractional*/
-
- temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
-
- if (pbuf < end) {
- for (i = 0; i < (end - pbuf); i++)
- pow *= 10;
-
- *val += temp_val / pow;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_write_frac_helper);
-
-int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
- int mult)
-{
- long decimal_val, frac_val;
- int prtn;
-
- if (count < 10)
- return -EINVAL;
-
- decimal_val = val / mult;
- prtn = snprintf(buffer, count, "%ld", decimal_val);
- frac_val = val % mult;
-
- if (prtn < (count - 4) && frac_val > 0) {
- long temp_frac;
- int i, temp_mult = 1, frac_bits = 0;
-
- temp_frac = frac_val * 10;
- buffer[prtn++] = '.';
- while (frac_bits < 2 && (temp_frac / mult) < 1 ) {
- /* only reserved 2 bits fraction */
- buffer[prtn++] ='0';
- temp_frac *= 10;
- frac_bits++;
- }
- /*
- * Need to think these cases :
- * 1. #echo x.00 > /proc/xxx output result : x
- * 2. #echo x.0x > /proc/xxx output result : x.0x
- * 3. #echo x.x0 > /proc/xxx output result : x.x
- * 4. #echo x.xx > /proc/xxx output result : x.xx
- * Only reserved 2 bits fraction.
- */
- for (i = 0; i < (5 - prtn); i++)
- temp_mult *= 10;
-
- frac_bits = min((int)count - prtn, 3 - frac_bits);
- prtn += snprintf(buffer + prtn, frac_bits, "%ld",
- frac_val * temp_mult / mult);
-
- prtn--;
- while(buffer[prtn] < '1' || buffer[prtn] > '9') {
- prtn--;
- if (buffer[prtn] == '.') {
- prtn--;
- break;
- }
- }
- prtn++;
- }
- buffer[prtn++] ='\n';
- return prtn;
-}
-EXPORT_SYMBOL(lprocfs_read_frac_helper);
-
int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult)
{
long decimal_val, frac_val;
@@ -1983,4 +1979,4 @@ int lprocfs_obd_rd_max_pages_per_rpc(struct seq_file *m, void *data)
}
EXPORT_SYMBOL(lprocfs_obd_rd_max_pages_per_rpc);
-#endif /* LPROCFS*/
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 212823a..9887d8f 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -200,6 +200,8 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
struct lu_object *scan;
struct lu_object *top;
struct list_head *layers;
+ unsigned int init_mask = 0;
+ unsigned int init_flag;
int clean;
int result;
@@ -218,15 +220,17 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
*/
top->lo_header->loh_fid = *f;
layers = &top->lo_header->loh_layers;
+
do {
/*
* Call ->loo_object_init() repeatedly, until no more new
* object slices are created.
*/
clean = 1;
+ init_flag = 1;
list_for_each_entry(scan, layers, lo_linkage) {
- if (scan->lo_flags & LU_OBJECT_ALLOCATED)
- continue;
+ if (init_mask & init_flag)
+ goto next;
clean = 0;
scan->lo_header = top->lo_header;
result = scan->lo_ops->loo_object_init(env, scan, conf);
@@ -234,7 +238,9 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
lu_object_free(env, top);
return ERR_PTR(result);
}
- scan->lo_flags |= LU_OBJECT_ALLOCATED;
+ init_mask |= init_flag;
+next:
+ init_flag <<= 1;
}
} while (!clean);
@@ -423,7 +429,7 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
*/
struct lu_context_key lu_global_key = {
.lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
- LCT_MG_THREAD | LCT_CL_THREAD,
+ LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
.lct_init = lu_global_key_init,
.lct_fini = lu_global_key_fini
};
@@ -487,23 +493,25 @@ void lu_object_print(const struct lu_env *env, void *cookie,
{
static const char ruler[] = "........................................";
struct lu_object_header *top;
- int depth;
+ int depth = 4;
top = o->lo_header;
lu_object_header_print(env, cookie, printer, top);
- (*printer)(env, cookie, "{ \n");
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
- depth = o->lo_depth + 4;
+ (*printer)(env, cookie, "{\n");
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
/*
* print `.' \a depth times followed by type name and address
*/
(*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
o->lo_dev->ld_type->ldt_name, o);
+
if (o->lo_ops->loo_object_print != NULL)
- o->lo_ops->loo_object_print(env, cookie, printer, o);
+ (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
+
(*printer)(env, cookie, "\n");
}
+
(*printer)(env, cookie, "} header@%p\n", top);
}
EXPORT_SYMBOL(lu_object_print);
@@ -830,8 +838,8 @@ enum {
};
static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
- "Percentage of memory to be used as lu_object cache");
+module_param(lu_cache_percent, int, 0644);
+MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
/**
* Return desired hash table order.
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 68a4d6a..a69a630 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -631,6 +631,9 @@ int lustre_put_lsi(struct super_block *sb)
CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
if (atomic_dec_and_test(&lsi->lsi_mounts)) {
if (IS_SERVER(lsi) && lsi->lsi_osd_exp) {
+ lu_device_put(&lsi->lsi_dt_dev->dd_lu_dev);
+ lsi->lsi_osd_exp->exp_obd->obd_lvfs_ctxt.dt = NULL;
+ lsi->lsi_dt_dev = NULL;
obd_disconnect(lsi->lsi_osd_exp);
/* wait till OSD is gone */
obd_zombie_barrier();
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 1fb0ac4..9b2dea2 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1106,7 +1106,7 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
/* coverity[overrun-buffer-val] */
obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
if (IS_ERR(obj))
- GOTO(out, eco = (void*)obj);
+ GOTO(out, eco = (void *)obj);
eco = cl2echo_obj(obj);
if (eco->eo_deleted) {
diff --git a/drivers/staging/lustre/lustre/osc/Makefile b/drivers/staging/lustre/lustre/osc/Makefile
index bbd2f77..4488162 100644
--- a/drivers/staging/lustre/lustre/osc/Makefile
+++ b/drivers/staging/lustre/lustre/osc/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += osc.o
-osc-y := osc_request.o lproc_osc.o osc_dev.o osc_object.o \
+osc-y := osc_request.o osc_dev.o osc_object.o \
osc_page.o osc_lock.o osc_io.o osc_quota.o osc_cache.o
+osc-$(CONFIG_PROC_FS) += lproc_osc.o
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index ef10e2a..0b59fc1 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -42,7 +42,6 @@
#include <linux/seq_file.h>
#include "osc_internal.h"
-#ifdef LPROCFS
static int osc_active_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
@@ -724,4 +723,3 @@ void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_osc_module_vars;
lvars->obd_vars = lprocfs_osc_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 00295da..be4511e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1703,7 +1703,7 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
return is_ready;
}
-/* this is trying to propogate async writeback errors back up to the
+/* this is trying to propagate async writeback errors back up to the
* application. As an async write fails we record the error code for later if
* the app does an fsync. As long as errors persist we force future rpcs to be
* sync so that the app can get a sync error and break the cycle of queueing
@@ -2006,7 +2006,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshhold */
+ * they don't pass the nr_pending/object threshold */
if (!list_empty(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list))
return list_to_obj(&cli->cl_loi_write_list, write_item);
@@ -2226,7 +2226,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page
* can be added to the active extent and sometimes we need to
- * expand extent to accomodate this page;
+ * expand extent to accommodate this page;
* 2. otherwise, a new extent will be allocated. */
ext = oio->oi_active;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index a3aa9b6..9e7899f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -299,7 +299,7 @@ struct osc_lock {
ols_flush:1,
/**
* if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
- * the EVAVAIL error as torerable, this will make upper logic happy
+ * the EVAVAIL error as tolerable, this will make upper logic happy
* to wait all glimpse locks to each OSTs to be completed.
* Glimpse lock converts to normal lock if the server lock is
* granted.
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index c90abfb..ef7b9c2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -929,7 +929,7 @@ static void osc_lock_build_einfo(const struct lu_env *env,
* Determine if the lock should be converted into a lockless lock.
*
* Steps to check:
- * - if the lock has an explicite requirment for a non-lockless lock;
+ * - if the lock has an explicit requirement for a non-lockless lock;
* - if the io lock request type ci_lockreq;
* - send the enqueue rpc to ost to make the further decision;
* - special treat to truncate lockless lock
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 6c20b8e..4909e486 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -587,7 +587,7 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
-/* free this number at most otherwise it will take too long time to finsih. */
+/* free this number at most otherwise it will take too long time to finish. */
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
@@ -606,7 +606,7 @@ static int osc_cache_too_much(struct client_obd *cli)
return min(pages, lru_shrink_max);
/* if it's going to run out LRU slots, we should free some, but not
- * too much to maintain faireness among OSCs. */
+ * too much to maintain fairness among OSCs. */
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
unsigned long tmp;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index cb19778..ee6953a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -46,10 +46,6 @@
#include <obd_ost.h>
#include <obd_lov.h>
-#ifdef __CYGWIN__
-# include <ctype.h>
-#endif
-
#include <lustre_ha.h>
#include <lprocfs_status.h>
#include <lustre_log.h>
@@ -777,7 +773,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
osc_pack_capa(req, body, (struct obd_capa *)capa);
ptlrpc_request_set_replen(req);
- /* If osc_destory is for destroying the unlink orphan,
+ /* If osc_destroy is for destroying the unlink orphan,
* sent from MDT to OST, which should not be blocked here,
* because the process might be triggered by ptlrpcd, and
* it is not good to block ptlrpcd thread (b=16006)*/
@@ -1197,8 +1193,12 @@ static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
pga[i]->off & ~CFS_PAGE_MASK,
count);
- LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
+ CDEBUG(D_PAGE,
+ "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
+ pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
+ (long)pga[i]->pg->flags, page_count(pga[i]->pg),
+ page_private(pga[i]->pg),
+ (int)(pga[i]->off & ~CFS_PAGE_MASK));
nob -= pga[i]->count;
pg_count--;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
index 6d78b80..1c338aa 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/Makefile
+++ b/drivers/staging/lustre/lustre/ptlrpc/Makefile
@@ -10,12 +10,13 @@ ldlm_objs += $(LDLM)ldlm_pool.o
ldlm_objs += $(LDLM)interval_tree.o
ptlrpc_objs := client.o recover.o connection.o niobuf.o pack_generic.o
ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o
-ptlrpc_objs += llog_net.o llog_client.o llog_server.o import.o ptlrpcd.o
+ptlrpc_objs += llog_net.o llog_client.o import.o ptlrpcd.o
ptlrpc_objs += pers.o lproc_ptlrpc.o wiretest.o layout.o
-ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o sec_lproc.o
+ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o
ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o
ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs)
+ptlrpc-$(CONFIG_PROC_FS) += sec_lproc.o
ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o
obj-$(CONFIG_PTLRPC_GSS) += gss/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index c2ab0c8..d90efe4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -200,7 +200,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
class_import_put(desc->bd_import);
if (unpin) {
- for (i = 0; i < desc->bd_iov_count ; i++)
+ for (i = 0; i < desc->bd_iov_count; i++)
page_cache_release(desc->bd_iov[i].kiov_page);
}
@@ -459,7 +459,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
{
struct ptlrpc_request_pool *pool;
- OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
+ OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
if (!pool)
return NULL;
@@ -475,7 +475,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
if (list_empty(&pool->prp_req_list)) {
/* have not allocated a single request for the pool */
- OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
+ OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
pool = NULL;
}
return pool;
@@ -881,7 +881,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
/* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each (tmp, &set->set_requests) {
+ list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
@@ -912,7 +912,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- ptlrpc_req_finished (req);
+ ptlrpc_req_finished(req);
}
LASSERT(atomic_read(&set->set_remaining) == 0);
@@ -1020,7 +1020,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
{
int delay = 0;
- LASSERT (status != NULL);
+ LASSERT(status != NULL);
*status = 0;
if (req->rq_ctx_init || req->rq_ctx_fini) {
@@ -1039,7 +1039,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
*status = -EIO;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */ ;
+ /* allow CONNECT even if import is invalid */
if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
@@ -1596,7 +1596,8 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
continue;
spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req, &status)){
+ if (ptlrpc_import_delay_req(imp, req,
+ &status)) {
/* put on delay list - only if we wait
* recovery finished - before send */
list_del_init(&req->rq_list);
@@ -1752,7 +1753,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- interpret:
+interpret:
LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
/* This moves to "unregistering" phase we need to wait for
@@ -1907,7 +1908,7 @@ int ptlrpc_expired_set(void *data)
/*
* A timeout expired. See which reqs it applies to...
*/
- list_for_each (tmp, &set->set_requests) {
+ list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
@@ -2688,7 +2689,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
+ LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
memset(aa, 0, sizeof(*aa));
@@ -2962,7 +2963,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
init_waitqueue_head(&req->rq_set_waitq);
atomic_set(&req->rq_refcount, 1);
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->magic = PTLRPC_WORK_MAGIC;
args->cb = cb;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 58d089c..f66cfea 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -56,9 +56,9 @@ void request_out_callback(lnet_event_t *ev)
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- LASSERT (ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->unlinked);
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
@@ -90,9 +90,9 @@ void reply_in_callback(lnet_event_t *ev)
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
- LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->md.start == req->rq_repbuf);
- LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
+ LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->md.start == req->rq_repbuf);
+ LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
@@ -113,7 +113,7 @@ void reply_in_callback(lnet_event_t *ev)
goto out_wake;
}
- if (ev->mlength < ev->rlength ) {
+ if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
req->rq_reply_truncate = 1;
@@ -167,18 +167,18 @@ out_wake:
/*
* Client's bulk has been written/read
*/
-void client_bulk_callback (lnet_event_t *ev)
+void client_bulk_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- LASSERT ((desc->bd_type == BULK_PUT_SINK &&
- ev->type == LNET_EVENT_PUT) ||
- (desc->bd_type == BULK_GET_SOURCE &&
- ev->type == LNET_EVENT_GET) ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->unlinked);
+ LASSERT((desc->bd_type == BULK_PUT_SINK &&
+ ev->type == LNET_EVENT_PUT) ||
+ (desc->bd_type == BULK_GET_SOURCE &&
+ ev->type == LNET_EVENT_GET) ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->unlinked);
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
ev->status = -EIO;
@@ -283,11 +283,11 @@ void request_in_callback(lnet_event_t *ev)
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
- LASSERT (ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
- LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
- rqbd->rqbd_buffer + service->srv_buf_size);
+ LASSERT(ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
+ LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
+ rqbd->rqbd_buffer + service->srv_buf_size);
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, service %s\n",
@@ -300,9 +300,9 @@ void request_in_callback(lnet_event_t *ev)
* we'd have to re-post the rqbd, which we can't do in this
* context. */
req = &rqbd->rqbd_req;
- memset(req, 0, sizeof (*req));
+ memset(req, 0, sizeof(*req));
} else {
- LASSERT (ev->type == LNET_EVENT_PUT);
+ LASSERT(ev->type == LNET_EVENT_PUT);
if (ev->status != 0) {
/* We moaned above already... */
return;
@@ -381,19 +381,19 @@ void reply_out_callback(lnet_event_t *ev)
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- LASSERT (ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_ACK ||
- ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_ACK ||
+ ev->type == LNET_EVENT_UNLINK);
if (!rs->rs_difficult) {
/* 'Easy' replies have no further processing so I drop the
* net's ref on 'rs' */
- LASSERT (ev->unlinked);
+ LASSERT(ev->unlinked);
ptlrpc_rs_decref(rs);
return;
}
- LASSERT (rs->rs_on_net);
+ LASSERT(rs->rs_on_net);
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
@@ -419,18 +419,17 @@ static void ptlrpc_master_callback(lnet_event_t *ev)
void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */
- LASSERT (cbid->cbid_arg != LP_POISON);
- LASSERT (callback == request_out_callback ||
- callback == reply_in_callback ||
- callback == client_bulk_callback ||
- callback == request_in_callback ||
- callback == reply_out_callback
- );
-
- callback (ev);
+ LASSERT(cbid->cbid_arg != LP_POISON);
+ LASSERT(callback == request_out_callback ||
+ callback == reply_in_callback ||
+ callback == client_bulk_callback ||
+ callback == request_in_callback ||
+ callback == reply_out_callback);
+
+ callback(ev);
}
-int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
+int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
lnet_process_id_t *peer, lnet_nid_t *self)
{
int best_dist = 0;
@@ -538,7 +537,7 @@ int ptlrpc_ni_init(void)
/* We're not passing any limits yet... */
rc = LNetNIInit(pid);
if (rc < 0) {
- CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
+ CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
return (-ENOENT);
}
@@ -552,7 +551,7 @@ int ptlrpc_ni_init(void)
if (rc == 0)
return 0;
- CERROR ("Failed to allocate event queue: %d\n", rc);
+ CERROR("Failed to allocate event queue: %d\n", rc);
LNetNIFini();
return (-ENOMEM);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
index c70eb00..bdfd838 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
@@ -64,9 +64,9 @@
#define G_REFLECT (-2045022961L)
#define G_WRONG_TOKID (-2045022960L)
-#define g_OID_equal(o1,o2) \
- (((o1)->len == (o2)->len) && \
- (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
+#define g_OID_equal(o1, o2) \
+ (((o1)->len == (o2)->len) && \
+ (memcmp((o1)->data, (o2)->data, (int) (o1)->len) == 0))
__u32 g_verify_token_header(rawobj_t *mech,
int *body_size,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
index b518d8a..7852bf3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
@@ -37,7 +37,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
index 1342579..37ec101 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
@@ -106,14 +106,14 @@ typedef unsigned int OM_uint32;
* evaluates its argument only once.
*/
#define GSS_CALLING_ERROR(x) \
- ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET))
+ ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET))
#define GSS_ROUTINE_ERROR(x) \
- ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))
+ ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))
#define GSS_SUPPLEMENTARY_INFO(x) \
- ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET))
+ ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET))
#define GSS_ERROR(x) \
- ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \
- (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)))
+ ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \
+ (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)))
/*
* Now the actual status code definitions
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
index 20b1638..56c2828 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
@@ -42,7 +42,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
index 188dbbf..d43a13c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
@@ -165,7 +165,7 @@ void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
init_timer(timer);
timer->expires = timeout;
- timer->data = (unsigned long ) ctx;
+ timer->data = (unsigned long) ctx;
timer->function = ctx_upcall_timeout_kr;
add_timer(timer);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
index c106a9e..b9fa3b4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -1276,7 +1276,7 @@ arc4_out_tfm:
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
- do {} while(0); /* just to avoid compile warning */
+ do {} while (0); /* just to avoid compile warning */
} else {
rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
3, data_desc, &cipher, 1);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
index 8cdad80..99462e0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
@@ -44,7 +44,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
index de100a1..a0a74e5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
index b42ddda..8ce6271 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
@@ -483,7 +483,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
memset(window, 0, win_size / 8);
*max_seq = seq_num;
} else {
- while(*max_seq < seq_num) {
+ while (*max_seq < seq_num) {
(*max_seq)++;
__clear_bit((*max_seq) % win_size, window);
}
@@ -804,7 +804,8 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
case PTLRPC_GSS_PROC_DATA:
pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
- if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ if (!req->rq_early &&
+ !equi(req->rq_pack_bulk == 1, pack_bulk)) {
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
return -EPROTO;
@@ -1009,7 +1010,8 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
case PTLRPC_GSS_PROC_DATA:
pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
- if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ if (!req->rq_early &&
+ !equi(req->rq_pack_bulk == 1, pack_bulk)) {
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
return -EPROTO;
@@ -1979,7 +1981,7 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
return SECSVC_DROP;
}
- if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
+ if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4) {
CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
return SECSVC_DROP;
}
@@ -2369,7 +2371,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
if (swabbed)
gss_header_swabber(ghdr);
- switch(ghdr->gh_proc) {
+ switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_INIT:
case PTLRPC_GSS_PROC_CONTINUE_INIT:
rc = gss_svc_handle_init(req, gw);
@@ -2388,7 +2390,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
switch (rc) {
case SECSVC_OK:
- LASSERT (grctx->src_ctx);
+ LASSERT(grctx->src_ctx);
req->rq_auth_gss = 1;
req->rq_auth_remote = grctx->src_ctx->gsc_remote;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 7b96a0e..f465547 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -72,23 +72,23 @@ static void __import_set_state(struct obd_import *imp,
}
/* A CLOSED import should remain so. */
-#define IMPORT_SET_STATE_NOLOCK(imp, state) \
-do { \
- if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
- CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
- imp, obd2cli_tgt(imp->imp_obd), \
- ptlrpc_import_state_name(imp->imp_state), \
- ptlrpc_import_state_name(state)); \
- __import_set_state(imp, state); \
- } \
-} while(0)
+#define IMPORT_SET_STATE_NOLOCK(imp, state) \
+do { \
+ if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
+ CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
+ imp, obd2cli_tgt(imp->imp_obd), \
+ ptlrpc_import_state_name(imp->imp_state), \
+ ptlrpc_import_state_name(state)); \
+ __import_set_state(imp, state); \
+ } \
+} while (0)
#define IMPORT_SET_STATE(imp, state) \
do { \
spin_lock(&imp->imp_lock); \
IMPORT_SET_STATE_NOLOCK(imp, state); \
spin_unlock(&imp->imp_lock); \
-} while(0)
+} while (0)
static int ptlrpc_connect_interpret(const struct lu_env *env,
@@ -170,7 +170,6 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
target_len, target_start,
libcfs_nid2str(imp->imp_connection->c_peer.nid));
}
- ptlrpc_deactivate_timeouts(imp);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
spin_unlock(&imp->imp_lock);
@@ -383,7 +382,6 @@ void ptlrpc_activate_import(struct obd_import *imp)
spin_lock(&imp->imp_lock);
imp->imp_invalid = 0;
- ptlrpc_activate_timeouts(imp);
spin_unlock(&imp->imp_lock);
obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
}
@@ -680,7 +678,7 @@ int ptlrpc_connect_import(struct obd_import *imp)
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
- CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
aa = ptlrpc_req_async_args(request);
memset(aa, 0, sizeof(*aa));
@@ -859,7 +857,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
if (MSG_CONNECT_RECONNECT & msg_flags) {
memset(&old_hdl, 0, sizeof(old_hdl));
if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
- sizeof (old_hdl))) {
+ sizeof(old_hdl))) {
LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
"bad handle "LPX64"\n",
obd2cli_tgt(imp->imp_obd),
@@ -1135,9 +1133,11 @@ out:
if (ocd &&
(ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
(ocd->ocd_version != LUSTRE_VERSION_CODE)) {
- /* Actually servers are only supposed to refuse
- connection from liblustre clients, so we should
- never see this from VFS context */
+ /*
+ * Actually servers are only supposed to refuse
+ * connection from liblustre clients, so we
+ * should never see this from VFS context
+ */
LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
"(%d.%d.%d.%d)"
" refused connection from this client "
@@ -1507,7 +1507,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
at->at_worst_time = now;
at->at_hist[0] = val;
at->at_binstart = now;
- } else if (now - at->at_binstart < binlimit ) {
+ } else if (now - at->at_binstart < binlimit) {
/* in bin 0 */
at->at_hist[0] = max(val, at->at_hist[0]);
at->at_current = max(val, at->at_current);
@@ -1517,7 +1517,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
/* move bins over */
shift = (now - at->at_binstart) / binlimit;
LASSERT(shift > 0);
- for(i = AT_BINS - 1; i >= 0; i--) {
+ for (i = AT_BINS - 1; i >= 0; i--) {
if (i >= shift) {
at->at_hist[i] = at->at_hist[i - shift];
maxv = max(maxv, at->at_hist[i]);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index d0a6e56..dfcb410 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -145,6 +145,14 @@ static const struct req_msg_field *mdt_close_client[] = {
&RMF_CAPA1
};
+static const struct req_msg_field *mdt_release_close_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_EPOCH,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CLOSE_DATA
+};
+
static const struct req_msg_field *obd_statfs_server[] = {
&RMF_PTLRPC_BODY,
&RMF_OBD_STATFS
@@ -454,6 +462,25 @@ static const struct req_msg_field *ldlm_intent_unlink_client[] = {
&RMF_NAME
};
+static const struct req_msg_field *ldlm_intent_getxattr_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_MDT_BODY,
+ &RMF_CAPA1,
+};
+
+static const struct req_msg_field *ldlm_intent_getxattr_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL, /* for req_capsule_extend/mdt_intent_policy */
+ &RMF_EADATA,
+ &RMF_EAVALS,
+ &RMF_EAVALS_LENS
+};
+
static const struct req_msg_field *mds_getxattr_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -666,6 +693,7 @@ static struct req_format *req_formats[] = {
&RQF_MDS_GETXATTR,
&RQF_MDS_SYNC,
&RQF_MDS_CLOSE,
+ &RQF_MDS_RELEASE_CLOSE,
&RQF_MDS_PIN,
&RQF_MDS_UNPIN,
&RQF_MDS_READPAGE,
@@ -730,6 +758,7 @@ static struct req_format *req_formats[] = {
&RQF_LDLM_INTENT_OPEN,
&RQF_LDLM_INTENT_CREATE,
&RQF_LDLM_INTENT_UNLINK,
+ &RQF_LDLM_INTENT_GETXATTR,
&RQF_LDLM_INTENT_QUOTA,
&RQF_QUOTA_DQACQ,
&RQF_LOG_CANCEL,
@@ -738,7 +767,8 @@ static struct req_format *req_formats[] = {
&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
&RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
- &RQF_LLOG_ORIGIN_CONNECT
+ &RQF_LLOG_ORIGIN_CONNECT,
+ &RQF_CONNECT,
};
struct req_msg_field {
@@ -884,6 +914,11 @@ struct req_msg_field RMF_PTLRPC_BODY =
sizeof(struct ptlrpc_body), lustre_swab_ptlrpc_body, NULL);
EXPORT_SYMBOL(RMF_PTLRPC_BODY);
+struct req_msg_field RMF_CLOSE_DATA =
+ DEFINE_MSGF("data_version", 0,
+ sizeof(struct close_data), lustre_swab_close_data, NULL);
+EXPORT_SYMBOL(RMF_CLOSE_DATA);
+
struct req_msg_field RMF_OBD_STATFS =
DEFINE_MSGF("obd_statfs", 0,
sizeof(struct obd_statfs), lustre_swab_obd_statfs, NULL);
@@ -998,6 +1033,9 @@ struct req_msg_field RMF_EADATA = DEFINE_MSGF("eadata", 0, -1,
NULL, NULL);
EXPORT_SYMBOL(RMF_EADATA);
+struct req_msg_field RMF_EAVALS = DEFINE_MSGF("eavals", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_EAVALS);
+
struct req_msg_field RMF_ACL =
DEFINE_MSGF("acl", RMF_F_NO_SIZE_CHECK,
LUSTRE_POSIX_ACL_MAX_SIZE, NULL, NULL);
@@ -1049,6 +1087,11 @@ struct req_msg_field RMF_RCS =
lustre_swab_generic_32s, dump_rcs);
EXPORT_SYMBOL(RMF_RCS);
+struct req_msg_field RMF_EAVALS_LENS =
+ DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32),
+ lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_EAVALS_LENS);
+
struct req_msg_field RMF_OBD_ID =
DEFINE_MSGF("obd_id", 0,
sizeof(obd_id), lustre_swab_ost_last_id, NULL);
@@ -1406,11 +1449,22 @@ struct req_format RQF_LDLM_INTENT_UNLINK =
ldlm_intent_unlink_client, ldlm_intent_server);
EXPORT_SYMBOL(RQF_LDLM_INTENT_UNLINK);
+struct req_format RQF_LDLM_INTENT_GETXATTR =
+ DEFINE_REQ_FMT0("LDLM_INTENT_GETXATTR",
+ ldlm_intent_getxattr_client,
+ ldlm_intent_getxattr_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_GETXATTR);
+
struct req_format RQF_MDS_CLOSE =
DEFINE_REQ_FMT0("MDS_CLOSE",
mdt_close_client, mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_CLOSE);
+struct req_format RQF_MDS_RELEASE_CLOSE =
+ DEFINE_REQ_FMT0("MDS_CLOSE",
+ mdt_release_close_client, mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
+
struct req_format RQF_MDS_PIN =
DEFINE_REQ_FMT0("MDS_PIN",
mdt_body_capa, mdt_body_only);
@@ -1504,6 +1558,10 @@ struct req_format RQF_LLOG_ORIGIN_CONNECT =
DEFINE_REQ_FMT0("LLOG_ORIGIN_CONNECT", llogd_conn_body_only, empty);
EXPORT_SYMBOL(RQF_LLOG_ORIGIN_CONNECT);
+struct req_format RQF_CONNECT =
+ DEFINE_REQ_FMT0("CONNECT", obd_connect_client, obd_connect_server);
+EXPORT_SYMBOL(RQF_CONNECT);
+
struct req_format RQF_OST_CONNECT =
DEFINE_REQ_FMT0("OST_CONNECT",
obd_connect_client, obd_connect_server);
@@ -1808,7 +1866,7 @@ swabber_dumper_helper(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
int offset,
- void *value, int len, int dump, void (*swabber)( void *))
+ void *value, int len, int dump, void (*swabber)(void *))
{
void *p;
int i;
@@ -1824,8 +1882,11 @@ swabber_dumper_helper(struct req_capsule *pill,
else
do_swab = 0;
+ if (!field->rmf_dumper)
+ dump = 0;
+
if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY)) {
- if (dump && field->rmf_dumper) {
+ if (dump) {
CDEBUG(D_RPCTRACE, "Dump of %sfield %s follows\n",
do_swab ? "unswabbed " : "", field->rmf_name);
field->rmf_dumper(value);
@@ -1851,7 +1912,7 @@ swabber_dumper_helper(struct req_capsule *pill,
for (p = value, i = 0, n = len / field->rmf_size;
i < n;
i++, p += field->rmf_size) {
- if (dump && field->rmf_dumper) {
+ if (dump) {
CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, "
"element %d follows\n",
do_swab ? "unswabbed " : "", field->rmf_name, i);
@@ -1860,7 +1921,7 @@ swabber_dumper_helper(struct req_capsule *pill,
if (!do_swab)
continue;
swabber(p);
- if (dump && field->rmf_dumper) {
+ if (dump) {
CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, "
"element %d follows\n", field->rmf_name, i);
field->rmf_dumper(value);
@@ -1883,7 +1944,7 @@ swabber_dumper_helper(struct req_capsule *pill,
static void *__req_capsule_get(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
- void (*swabber)( void *),
+ void (*swabber)(void *),
int dump)
{
const struct req_format *fmt;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 379e594..ab08454 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -63,7 +63,7 @@
return (-EINVAL); \
} \
mutex_unlock(&ctxt->loc_mutex); \
-} while(0)
+} while (0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
mutex_lock(&ctxt->loc_mutex); \
@@ -72,7 +72,7 @@
ctxt->loc_imp, imp); \
class_import_put(imp); \
mutex_unlock(&ctxt->loc_mutex); \
-} while(0)
+} while (0)
/* This is a callback from the llog_* functions.
* Assumes caller has already pushed us into the kernel context. */
@@ -302,7 +302,7 @@ static int llog_client_read_header(const struct lu_env *env,
if (hdr == NULL)
GOTO(out, rc =-EFAULT);
- memcpy(handle->lgh_hdr, hdr, sizeof (*hdr));
+ memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
/* sanity checks */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c b/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
deleted file mode 100644
index af9d2ac..0000000
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/llog_server.c
- *
- * remote api for llog - server side
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-
-#include <obd_class.h>
-#include <lustre_log.h>
-#include <lustre_net.h>
-#include <lustre_fsfilt.h>
-
-#if defined(LUSTRE_LOG_SERVER)
-static int llog_origin_close(const struct lu_env *env, struct llog_handle *lgh)
-{
- if (lgh->lgh_hdr != NULL && lgh->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
- return llog_cat_close(env, lgh);
- else
- return llog_close(env, lgh);
-}
-
-/* Only open is supported, no new llog can be created remotely */
-int llog_origin_handle_open(struct ptlrpc_request *req)
-{
- struct obd_export *exp = req->rq_export;
- struct obd_device *obd = exp->exp_obd;
- struct obd_device *disk_obd;
- struct lvfs_run_ctxt saved;
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llog_logid *logid = NULL;
- struct llog_ctxt *ctxt;
- char *name = NULL;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
- logid = &body->lgd_logid;
-
- if (req_capsule_field_present(&req->rq_pill, &RMF_NAME, RCL_CLIENT)) {
- name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- if (name == NULL)
- return -EFAULT;
- CDEBUG(D_INFO, "%s: opening log %s\n", obd->obd_name, name);
- }
-
- ctxt = llog_get_context(obd, body->lgd_ctxt_idx);
- if (ctxt == NULL) {
- CDEBUG(D_WARNING, "%s: no ctxt. group=%p idx=%d name=%s\n",
- obd->obd_name, &obd->obd_olg, body->lgd_ctxt_idx, name);
- return -ENODEV;
- }
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle, logid,
- name, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = loghandle->lgh_id;
-
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_open);
-
-int llog_origin_handle_destroy(struct ptlrpc_request *req)
-{
- struct obd_device *disk_obd;
- struct lvfs_run_ctxt saved;
- struct llogd_body *body;
- struct llog_logid *logid = NULL;
- struct llog_ctxt *ctxt;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
- logid = &body->lgd_logid;
-
- if (!(body->lgd_llh_flags & LLOG_F_IS_PLAIN))
- CERROR("%s: wrong llog flags %x\n",
- req->rq_export->exp_obd->obd_name, body->lgd_llh_flags);
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = req_capsule_server_pack(&req->rq_pill);
- /* erase only if no error and logid is valid */
- if (rc == 0)
- rc = llog_erase(req->rq_svc_thread->t_env, ctxt, logid, NULL);
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_destroy);
-
-int llog_origin_handle_next_block(struct ptlrpc_request *req)
-{
- struct obd_device *disk_obd;
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llogd_body *repbody;
- struct lvfs_run_ctxt saved;
- struct llog_ctxt *ctxt;
- __u32 flags;
- void *ptr;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
- &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- flags = body->lgd_llh_flags;
- rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
- NULL);
- if (rc)
- GOTO(out_close, rc);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER,
- LLOG_CHUNK_SIZE);
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- *repbody = *body;
-
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- rc = llog_next_block(req->rq_svc_thread->t_env, loghandle,
- &repbody->lgd_saved_index, repbody->lgd_index,
- &repbody->lgd_cur_offset, ptr, LLOG_CHUNK_SIZE);
- if (rc)
- GOTO(out_close, rc);
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_next_block);
-
-int llog_origin_handle_prev_block(struct ptlrpc_request *req)
-{
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llogd_body *repbody;
- struct obd_device *disk_obd;
- struct lvfs_run_ctxt saved;
- struct llog_ctxt *ctxt;
- __u32 flags;
- void *ptr;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
- &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- flags = body->lgd_llh_flags;
- rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
- NULL);
- if (rc)
- GOTO(out_close, rc);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER,
- LLOG_CHUNK_SIZE);
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- *repbody = *body;
-
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- rc = llog_prev_block(req->rq_svc_thread->t_env, loghandle,
- body->lgd_index, ptr, LLOG_CHUNK_SIZE);
- if (rc)
- GOTO(out_close, rc);
-
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_prev_block);
-
-int llog_origin_handle_read_header(struct ptlrpc_request *req)
-{
- struct obd_device *disk_obd;
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llog_log_hdr *hdr;
- struct lvfs_run_ctxt saved;
- struct llog_ctxt *ctxt;
- __u32 flags;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
- &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- /*
- * llog_init_handle() reads the llog header
- */
- flags = body->lgd_llh_flags;
- rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
- NULL);
- if (rc)
- GOTO(out_close, rc);
- flags = loghandle->lgh_hdr->llh_flags;
-
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
- *hdr = *loghandle->lgh_hdr;
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_read_header);
-
-int llog_origin_handle_close(struct ptlrpc_request *req)
-{
- /* Nothing to do */
- return 0;
-}
-EXPORT_SYMBOL(llog_origin_handle_close);
-
-int llog_origin_handle_cancel(struct ptlrpc_request *req)
-{
- int num_cookies, rc = 0, err, i, failed = 0;
- struct obd_device *disk_obd;
- struct llog_cookie *logcookies;
- struct llog_ctxt *ctxt = NULL;
- struct lvfs_run_ctxt saved;
- struct llog_handle *cathandle;
- struct inode *inode;
- void *handle;
-
- logcookies = req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES);
- num_cookies = req_capsule_get_size(&req->rq_pill, &RMF_LOGCOOKIES,
- RCL_CLIENT) / sizeof(*logcookies);
- if (logcookies == NULL || num_cookies == 0) {
- DEBUG_REQ(D_HA, req, "No llog cookies sent");
- return -EFAULT;
- }
-
- ctxt = llog_get_context(req->rq_export->exp_obd,
- logcookies->lgc_subsys);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- for (i = 0; i < num_cookies; i++, logcookies++) {
- cathandle = ctxt->loc_handle;
- LASSERT(cathandle != NULL);
- inode = cathandle->lgh_file->f_dentry->d_inode;
-
- handle = fsfilt_start_log(disk_obd, inode,
- FSFILT_OP_CANCEL_UNLINK, NULL, 1);
- if (IS_ERR(handle)) {
- CERROR("fsfilt_start_log() failed: %ld\n",
- PTR_ERR(handle));
- GOTO(pop_ctxt, rc = PTR_ERR(handle));
- }
-
- rc = llog_cat_cancel_records(req->rq_svc_thread->t_env,
- cathandle, 1, logcookies);
-
- /*
- * Do not raise -ENOENT errors for resent rpcs. This rec already
- * might be killed.
- */
- if (rc == -ENOENT &&
- (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT)) {
- /*
- * Do not change this message, reply-single.sh test_59b
- * expects to find this in log.
- */
- CDEBUG(D_RPCTRACE, "RESENT cancel req %p - ignored\n",
- req);
- rc = 0;
- } else if (rc == 0) {
- CDEBUG(D_RPCTRACE, "Canceled %d llog-records\n",
- num_cookies);
- }
-
- err = fsfilt_commit(disk_obd, inode, handle, 0);
- if (err) {
- CERROR("Error committing transaction: %d\n", err);
- if (!rc)
- rc = err;
- failed++;
- GOTO(pop_ctxt, rc);
- } else if (rc)
- failed++;
- }
- GOTO(pop_ctxt, rc);
-pop_ctxt:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- if (rc)
- CERROR("Cancel %d of %d llog-records failed: %d\n",
- failed, num_cookies, rc);
-
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_cancel);
-
-#else /* !__KERNEL__ */
-int llog_origin_handle_open(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-
-int llog_origin_handle_destroy(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-
-int llog_origin_handle_next_block(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_prev_block(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_read_header(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_close(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_cancel(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index bea44a3..1be9786 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -46,8 +46,8 @@
struct ll_rpc_opcode {
- __u32 opcode;
- const char *opname;
+ __u32 opcode;
+ const char *opname;
} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
{ OST_REPLY, "ost_reply" },
{ OST_GETATTR, "ost_getattr" },
@@ -114,10 +114,10 @@ struct ll_rpc_opcode {
{ MGS_SET_INFO, "mgs_set_info" },
{ MGS_CONFIG_READ, "mgs_config_read" },
{ OBD_PING, "obd_ping" },
- { OBD_LOG_CANCEL, "llog_origin_handle_cancel" },
+ { OBD_LOG_CANCEL, "llog_cancel" },
{ OBD_QC_CALLBACK, "obd_quota_callback" },
{ OBD_IDX_READ, "dt_index_read" },
- { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_create" },
+ { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" },
{ LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
{ LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
{ LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
@@ -137,8 +137,8 @@ struct ll_rpc_opcode {
};
struct ll_eopcode {
- __u32 opcode;
- const char *opname;
+ __u32 opcode;
+ const char *opname;
} ll_eopcode_table[EXTRA_LAST_OPC] = {
{ LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
{ LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
@@ -221,7 +221,7 @@ void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
for (i = 0; i < EXTRA_LAST_OPC; i++) {
char *units;
- switch(i) {
+ switch (i) {
case BRW_WRITE_BYTES:
case BRW_READ_BYTES:
units = "bytes";
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index a0e0097..3c6bf23 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -47,17 +47,17 @@
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
-static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
- struct ptlrpc_connection *conn, int portal, __u64 xid,
- unsigned int offset)
+static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
+ lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
+ struct ptlrpc_connection *conn, int portal, __u64 xid,
+ unsigned int offset)
{
int rc;
lnet_md_t md;
- LASSERT (portal != 0);
- LASSERT (conn != NULL);
- CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
+ LASSERT(portal != 0);
+ LASSERT(conn != NULL);
+ CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
@@ -66,23 +66,24 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
md.eq_handle = ptlrpc_eq_h;
if (unlikely(ack == LNET_ACK_REQ &&
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
+ OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK,
+ OBD_FAIL_ONCE))) {
/* don't ask for the ack to simulate failing client */
ack = LNET_NOACK_REQ;
}
- rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
- CERROR ("LNetMDBind failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ CERROR("LNetMDBind failed: %d\n", rc);
+ LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
len, portal, xid, offset);
- rc = LNetPut (conn->c_self, *mdh, ack,
- conn->c_peer, portal, xid, offset, 0);
+ rc = LNetPut(conn->c_self, *mdh, ack,
+ conn->c_peer, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
int rc2;
/* We're going to get an UNLINK event when I unlink below,
@@ -179,7 +180,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
LNET_UNLINK, LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
- desc->bd_export->exp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, xid,
posted_md, rc);
break;
}
@@ -189,7 +190,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
- desc->bd_export->exp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, xid,
posted_md, rc);
rc2 = LNetMEUnlink(me_h);
LASSERT(rc2 == 0);
@@ -219,7 +220,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
/* Holler if peer manages to touch buffers before he knows the xid */
if (desc->bd_md_count != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
- desc->bd_export->exp_obd->obd_name, libcfs_id2str(peer),
+ desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
@@ -363,14 +364,14 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
* request, or a saved copy if this is a req saved in
* target_queue_final_reply().
*/
- LASSERT (req->rq_no_reply == 0);
- LASSERT (req->rq_reqbuf != NULL);
- LASSERT (rs != NULL);
- LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
- LASSERT (req->rq_repmsg != NULL);
- LASSERT (req->rq_repmsg == rs->rs_msg);
- LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
- LASSERT (rs->rs_cb_id.cbid_arg == rs);
+ LASSERT(req->rq_no_reply == 0);
+ LASSERT(req->rq_reqbuf != NULL);
+ LASSERT(rs != NULL);
+ LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
+ LASSERT(req->rq_repmsg != NULL);
+ LASSERT(req->rq_repmsg == rs->rs_msg);
+ LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
+ LASSERT(rs->rs_cb_id.cbid_arg == rs);
/* There may be no rq_export during failover */
@@ -423,12 +424,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
req->rq_sent = cfs_time_current_sec();
- rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
- (rs->rs_difficult && !rs->rs_no_ack) ?
- LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn,
- ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off);
+ rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
+ (rs->rs_difficult && !rs->rs_no_ack) ?
+ LNET_ACK_REQ : LNET_NOACK_REQ,
+ &rs->rs_cb_id, conn,
+ ptlrpc_req2svc(req)->srv_rep_portal,
+ req->rq_xid, req->rq_reply_off);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
@@ -437,7 +438,7 @@ out:
}
EXPORT_SYMBOL(ptlrpc_send_reply);
-int ptlrpc_reply (struct ptlrpc_request *req)
+int ptlrpc_reply(struct ptlrpc_request *req)
{
if (req->rq_no_reply)
return 0;
@@ -537,13 +538,13 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
/* bulk register should be done after wrap_request() */
if (request->rq_bulk != NULL) {
- rc = ptlrpc_register_bulk (request);
+ rc = ptlrpc_register_bulk(request);
if (rc != 0)
GOTO(out, rc);
}
if (!noreply) {
- LASSERT (request->rq_replen != 0);
+ LASSERT(request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
LASSERT(request->rq_repdata == NULL);
LASSERT(request->rq_repmsg == NULL);
@@ -566,7 +567,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ LASSERT(rc == -ENOMEM);
GOTO(cleanup_bulk, rc = -ENOMEM);
}
}
@@ -604,7 +605,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ LASSERT(rc == -ENOMEM);
spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
@@ -655,7 +656,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
* nobody apart from the PUT's target has the right nid+XID to
* access the reply buffer. */
rc2 = LNetMEUnlink(reply_me_h);
- LASSERT (rc2 == 0);
+ LASSERT(rc2 == 0);
/* UNLINKED callback called synchronously */
LASSERT(!request->rq_receiving_reply);
@@ -714,10 +715,10 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
if (rc == 0)
return (0);
- CERROR("LNetMDAttach failed: %d; \n", rc);
- LASSERT (rc == -ENOMEM);
- rc = LNetMEUnlink (me_h);
- LASSERT (rc == 0);
+ CERROR("LNetMDAttach failed: %d;\n", rc);
+ LASSERT(rc == -ENOMEM);
+ rc = LNetMEUnlink(me_h);
+ LASSERT(rc == 0);
rqbd->rqbd_refcount = 0;
return (-ENOMEM);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index cd2611a3..464479c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -274,8 +274,8 @@ do { \
spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#else
-# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
-# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
+# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while (0)
+# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while (0)
#endif
struct ptlrpc_reply_state *
@@ -507,14 +507,14 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
{
PTLRPC_RS_DEBUG_LRU_DEL(rs);
- LASSERT (atomic_read(&rs->rs_refcount) == 0);
- LASSERT (!rs->rs_difficult || rs->rs_handled);
- LASSERT (!rs->rs_on_net);
- LASSERT (!rs->rs_scheduled);
- LASSERT (rs->rs_export == NULL);
- LASSERT (rs->rs_nlocks == 0);
- LASSERT (list_empty(&rs->rs_exp_list));
- LASSERT (list_empty(&rs->rs_obd_list));
+ LASSERT(atomic_read(&rs->rs_refcount) == 0);
+ LASSERT(!rs->rs_difficult || rs->rs_handled);
+ LASSERT(!rs->rs_on_net);
+ LASSERT(!rs->rs_scheduled);
+ LASSERT(rs->rs_export == NULL);
+ LASSERT(rs->rs_nlocks == 0);
+ LASSERT(list_empty(&rs->rs_exp_list));
+ LASSERT(list_empty(&rs->rs_obd_list));
sptlrpc_svc_free_rs(rs);
}
@@ -548,8 +548,8 @@ static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
if (len < required_len) {
/* didn't receive all the buffer lengths */
- CERROR ("message length %d too small for %d buflens\n",
- len, m->lm_bufcount);
+ CERROR("message length %d too small for %d buflens\n",
+ len, m->lm_bufcount);
return -EINVAL;
}
@@ -636,8 +636,8 @@ static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
}
if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
- CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
- return -EINVAL;
+ CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
+ return -EINVAL;
}
if (!inout)
@@ -749,7 +749,7 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
}
if (str == NULL) {
- CERROR ("can't unpack string in msg %p buffer[%d]\n", m, index);
+ CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
return NULL;
}
@@ -1653,25 +1653,25 @@ EXPORT_SYMBOL(do_set_info_async);
*/
void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
{
- __swab32s (&b->pb_type);
- __swab32s (&b->pb_version);
- __swab32s (&b->pb_opc);
- __swab32s (&b->pb_status);
- __swab64s (&b->pb_last_xid);
- __swab64s (&b->pb_last_seen);
- __swab64s (&b->pb_last_committed);
- __swab64s (&b->pb_transno);
- __swab32s (&b->pb_flags);
- __swab32s (&b->pb_op_flags);
- __swab32s (&b->pb_conn_cnt);
- __swab32s (&b->pb_timeout);
- __swab32s (&b->pb_service_time);
- __swab32s (&b->pb_limit);
- __swab64s (&b->pb_slv);
- __swab64s (&b->pb_pre_versions[0]);
- __swab64s (&b->pb_pre_versions[1]);
- __swab64s (&b->pb_pre_versions[2]);
- __swab64s (&b->pb_pre_versions[3]);
+ __swab32s(&b->pb_type);
+ __swab32s(&b->pb_version);
+ __swab32s(&b->pb_opc);
+ __swab32s(&b->pb_status);
+ __swab64s(&b->pb_last_xid);
+ __swab64s(&b->pb_last_seen);
+ __swab64s(&b->pb_last_committed);
+ __swab64s(&b->pb_transno);
+ __swab32s(&b->pb_flags);
+ __swab32s(&b->pb_op_flags);
+ __swab32s(&b->pb_conn_cnt);
+ __swab32s(&b->pb_timeout);
+ __swab32s(&b->pb_service_time);
+ __swab32s(&b->pb_limit);
+ __swab64s(&b->pb_slv);
+ __swab64s(&b->pb_pre_versions[0]);
+ __swab64s(&b->pb_pre_versions[1]);
+ __swab64s(&b->pb_pre_versions[2]);
+ __swab64s(&b->pb_pre_versions[3]);
CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
/* While we need to maintain compatibility between
* clients and servers without ptlrpc_body_v2 (< 2.3)
@@ -1723,33 +1723,33 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
}
-void lustre_swab_obdo (struct obdo *o)
+void lustre_swab_obdo(struct obdo *o)
{
- __swab64s (&o->o_valid);
+ __swab64s(&o->o_valid);
lustre_swab_ost_id(&o->o_oi);
- __swab64s (&o->o_parent_seq);
- __swab64s (&o->o_size);
- __swab64s (&o->o_mtime);
- __swab64s (&o->o_atime);
- __swab64s (&o->o_ctime);
- __swab64s (&o->o_blocks);
- __swab64s (&o->o_grant);
- __swab32s (&o->o_blksize);
- __swab32s (&o->o_mode);
- __swab32s (&o->o_uid);
- __swab32s (&o->o_gid);
- __swab32s (&o->o_flags);
- __swab32s (&o->o_nlink);
- __swab32s (&o->o_parent_oid);
- __swab32s (&o->o_misc);
- __swab64s (&o->o_ioepoch);
- __swab32s (&o->o_stripe_idx);
- __swab32s (&o->o_parent_ver);
+ __swab64s(&o->o_parent_seq);
+ __swab64s(&o->o_size);
+ __swab64s(&o->o_mtime);
+ __swab64s(&o->o_atime);
+ __swab64s(&o->o_ctime);
+ __swab64s(&o->o_blocks);
+ __swab64s(&o->o_grant);
+ __swab32s(&o->o_blksize);
+ __swab32s(&o->o_mode);
+ __swab32s(&o->o_uid);
+ __swab32s(&o->o_gid);
+ __swab32s(&o->o_flags);
+ __swab32s(&o->o_nlink);
+ __swab32s(&o->o_parent_oid);
+ __swab32s(&o->o_misc);
+ __swab64s(&o->o_ioepoch);
+ __swab32s(&o->o_stripe_idx);
+ __swab32s(&o->o_parent_ver);
/* o_handle is opaque */
/* o_lcookie is swabbed elsewhere */
- __swab32s (&o->o_uid_h);
- __swab32s (&o->o_gid_h);
- __swab64s (&o->o_data_version);
+ __swab32s(&o->o_uid_h);
+ __swab32s(&o->o_gid_h);
+ __swab64s(&o->o_data_version);
CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
@@ -1757,19 +1757,19 @@ void lustre_swab_obdo (struct obdo *o)
}
EXPORT_SYMBOL(lustre_swab_obdo);
-void lustre_swab_obd_statfs (struct obd_statfs *os)
+void lustre_swab_obd_statfs(struct obd_statfs *os)
{
- __swab64s (&os->os_type);
- __swab64s (&os->os_blocks);
- __swab64s (&os->os_bfree);
- __swab64s (&os->os_bavail);
- __swab64s (&os->os_files);
- __swab64s (&os->os_ffree);
+ __swab64s(&os->os_type);
+ __swab64s(&os->os_blocks);
+ __swab64s(&os->os_bfree);
+ __swab64s(&os->os_bavail);
+ __swab64s(&os->os_files);
+ __swab64s(&os->os_ffree);
/* no need to swab os_fsid */
- __swab32s (&os->os_bsize);
- __swab32s (&os->os_namelen);
- __swab64s (&os->os_maxbytes);
- __swab32s (&os->os_state);
+ __swab32s(&os->os_bsize);
+ __swab32s(&os->os_namelen);
+ __swab64s(&os->os_maxbytes);
+ __swab32s(&os->os_state);
CLASSERT(offsetof(typeof(*os), os_fprecreated) != 0);
CLASSERT(offsetof(typeof(*os), os_spare2) != 0);
CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
@@ -1790,17 +1790,17 @@ void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
}
EXPORT_SYMBOL(lustre_swab_obd_ioobj);
-void lustre_swab_niobuf_remote (struct niobuf_remote *nbr)
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
{
- __swab64s (&nbr->offset);
- __swab32s (&nbr->len);
- __swab32s (&nbr->flags);
+ __swab64s(&nbr->offset);
+ __swab32s(&nbr->len);
+ __swab32s(&nbr->flags);
}
EXPORT_SYMBOL(lustre_swab_niobuf_remote);
-void lustre_swab_ost_body (struct ost_body *b)
+void lustre_swab_ost_body(struct ost_body *b)
{
- lustre_swab_obdo (&b->oa);
+ lustre_swab_obdo(&b->oa);
}
EXPORT_SYMBOL(lustre_swab_ost_body);
@@ -1861,45 +1861,45 @@ void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
}
EXPORT_SYMBOL(lustre_swab_lquota_lvb);
-void lustre_swab_mdt_body (struct mdt_body *b)
+void lustre_swab_mdt_body(struct mdt_body *b)
{
- lustre_swab_lu_fid (&b->fid1);
- lustre_swab_lu_fid (&b->fid2);
+ lustre_swab_lu_fid(&b->fid1);
+ lustre_swab_lu_fid(&b->fid2);
/* handle is opaque */
- __swab64s (&b->valid);
- __swab64s (&b->size);
- __swab64s (&b->mtime);
- __swab64s (&b->atime);
- __swab64s (&b->ctime);
- __swab64s (&b->blocks);
- __swab64s (&b->ioepoch);
- CLASSERT(offsetof(typeof(*b), unused1) != 0);
- __swab32s (&b->fsuid);
- __swab32s (&b->fsgid);
- __swab32s (&b->capability);
- __swab32s (&b->mode);
- __swab32s (&b->uid);
- __swab32s (&b->gid);
- __swab32s (&b->flags);
- __swab32s (&b->rdev);
- __swab32s (&b->nlink);
+ __swab64s(&b->valid);
+ __swab64s(&b->size);
+ __swab64s(&b->mtime);
+ __swab64s(&b->atime);
+ __swab64s(&b->ctime);
+ __swab64s(&b->blocks);
+ __swab64s(&b->ioepoch);
+ __swab64s(&b->t_state);
+ __swab32s(&b->fsuid);
+ __swab32s(&b->fsgid);
+ __swab32s(&b->capability);
+ __swab32s(&b->mode);
+ __swab32s(&b->uid);
+ __swab32s(&b->gid);
+ __swab32s(&b->flags);
+ __swab32s(&b->rdev);
+ __swab32s(&b->nlink);
CLASSERT(offsetof(typeof(*b), unused2) != 0);
- __swab32s (&b->suppgid);
- __swab32s (&b->eadatasize);
- __swab32s (&b->aclsize);
- __swab32s (&b->max_mdsize);
- __swab32s (&b->max_cookiesize);
- __swab32s (&b->uid_h);
- __swab32s (&b->gid_h);
+ __swab32s(&b->suppgid);
+ __swab32s(&b->eadatasize);
+ __swab32s(&b->aclsize);
+ __swab32s(&b->max_mdsize);
+ __swab32s(&b->max_cookiesize);
+ __swab32s(&b->uid_h);
+ __swab32s(&b->gid_h);
CLASSERT(offsetof(typeof(*b), padding_5) != 0);
}
EXPORT_SYMBOL(lustre_swab_mdt_body);
-void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b)
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
{
/* handle is opaque */
- __swab64s (&b->ioepoch);
- __swab32s (&b->flags);
+ __swab64s(&b->ioepoch);
+ __swab32s(&b->flags);
CLASSERT(offsetof(typeof(*b), padding) != 0);
}
EXPORT_SYMBOL(lustre_swab_mdt_ioepoch);
@@ -1957,49 +1957,49 @@ void lustre_swab_mgs_config_res(struct mgs_config_res *body)
}
EXPORT_SYMBOL(lustre_swab_mgs_config_res);
-static void lustre_swab_obd_dqinfo (struct obd_dqinfo *i)
+static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
{
- __swab64s (&i->dqi_bgrace);
- __swab64s (&i->dqi_igrace);
- __swab32s (&i->dqi_flags);
- __swab32s (&i->dqi_valid);
+ __swab64s(&i->dqi_bgrace);
+ __swab64s(&i->dqi_igrace);
+ __swab32s(&i->dqi_flags);
+ __swab32s(&i->dqi_valid);
}
-static void lustre_swab_obd_dqblk (struct obd_dqblk *b)
+static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
{
- __swab64s (&b->dqb_ihardlimit);
- __swab64s (&b->dqb_isoftlimit);
- __swab64s (&b->dqb_curinodes);
- __swab64s (&b->dqb_bhardlimit);
- __swab64s (&b->dqb_bsoftlimit);
- __swab64s (&b->dqb_curspace);
- __swab64s (&b->dqb_btime);
- __swab64s (&b->dqb_itime);
- __swab32s (&b->dqb_valid);
+ __swab64s(&b->dqb_ihardlimit);
+ __swab64s(&b->dqb_isoftlimit);
+ __swab64s(&b->dqb_curinodes);
+ __swab64s(&b->dqb_bhardlimit);
+ __swab64s(&b->dqb_bsoftlimit);
+ __swab64s(&b->dqb_curspace);
+ __swab64s(&b->dqb_btime);
+ __swab64s(&b->dqb_itime);
+ __swab32s(&b->dqb_valid);
CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
}
-void lustre_swab_obd_quotactl (struct obd_quotactl *q)
+void lustre_swab_obd_quotactl(struct obd_quotactl *q)
{
- __swab32s (&q->qc_cmd);
- __swab32s (&q->qc_type);
- __swab32s (&q->qc_id);
- __swab32s (&q->qc_stat);
- lustre_swab_obd_dqinfo (&q->qc_dqinfo);
- lustre_swab_obd_dqblk (&q->qc_dqblk);
+ __swab32s(&q->qc_cmd);
+ __swab32s(&q->qc_type);
+ __swab32s(&q->qc_id);
+ __swab32s(&q->qc_stat);
+ lustre_swab_obd_dqinfo(&q->qc_dqinfo);
+ lustre_swab_obd_dqblk(&q->qc_dqblk);
}
EXPORT_SYMBOL(lustre_swab_obd_quotactl);
-void lustre_swab_mdt_remote_perm (struct mdt_remote_perm *p)
+void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p)
{
- __swab32s (&p->rp_uid);
- __swab32s (&p->rp_gid);
- __swab32s (&p->rp_fsuid);
- __swab32s (&p->rp_fsuid_h);
- __swab32s (&p->rp_fsgid);
- __swab32s (&p->rp_fsgid_h);
- __swab32s (&p->rp_access_perm);
- __swab32s (&p->rp_padding);
+ __swab32s(&p->rp_uid);
+ __swab32s(&p->rp_gid);
+ __swab32s(&p->rp_fsuid);
+ __swab32s(&p->rp_fsuid_h);
+ __swab32s(&p->rp_fsgid);
+ __swab32s(&p->rp_fsgid_h);
+ __swab32s(&p->rp_access_perm);
+ __swab32s(&p->rp_padding);
};
EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
@@ -2089,31 +2089,31 @@ void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
};
EXPORT_SYMBOL(lustre_swab_mdt_rec_reint);
-void lustre_swab_lov_desc (struct lov_desc *ld)
+void lustre_swab_lov_desc(struct lov_desc *ld)
{
- __swab32s (&ld->ld_tgt_count);
- __swab32s (&ld->ld_active_tgt_count);
- __swab32s (&ld->ld_default_stripe_count);
- __swab32s (&ld->ld_pattern);
- __swab64s (&ld->ld_default_stripe_size);
- __swab64s (&ld->ld_default_stripe_offset);
- __swab32s (&ld->ld_qos_maxage);
+ __swab32s(&ld->ld_tgt_count);
+ __swab32s(&ld->ld_active_tgt_count);
+ __swab32s(&ld->ld_default_stripe_count);
+ __swab32s(&ld->ld_pattern);
+ __swab64s(&ld->ld_default_stripe_size);
+ __swab64s(&ld->ld_default_stripe_offset);
+ __swab32s(&ld->ld_qos_maxage);
/* uuid endian insensitive */
}
EXPORT_SYMBOL(lustre_swab_lov_desc);
-void lustre_swab_lmv_desc (struct lmv_desc *ld)
+void lustre_swab_lmv_desc(struct lmv_desc *ld)
{
- __swab32s (&ld->ld_tgt_count);
- __swab32s (&ld->ld_active_tgt_count);
- __swab32s (&ld->ld_default_stripe_count);
- __swab32s (&ld->ld_pattern);
- __swab64s (&ld->ld_default_hash_size);
- __swab32s (&ld->ld_qos_maxage);
+ __swab32s(&ld->ld_tgt_count);
+ __swab32s(&ld->ld_active_tgt_count);
+ __swab32s(&ld->ld_default_stripe_count);
+ __swab32s(&ld->ld_pattern);
+ __swab64s(&ld->ld_default_hash_size);
+ __swab32s(&ld->ld_qos_maxage);
/* uuid endian insensitive */
}
-void lustre_swab_lmv_stripe_md (struct lmv_stripe_md *mea)
+void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea)
{
__swab32s(&mea->mea_magic);
__swab32s(&mea->mea_count);
@@ -2142,7 +2142,7 @@ void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
}
EXPORT_SYMBOL(lustre_swab_lmv_user_md);
-static void print_lum (struct lov_user_md *lum)
+static void print_lum(struct lov_user_md *lum)
{
CDEBUG(D_OTHER, "lov_user_md %p:\n", lum);
CDEBUG(D_OTHER, "\tlmm_magic: %#x\n", lum->lmm_magic);
@@ -2212,16 +2212,16 @@ void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
}
EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
-void lustre_swab_ldlm_res_id (struct ldlm_res_id *id)
+void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
{
int i;
for (i = 0; i < RES_NAME_SIZE; i++)
- __swab64s (&id->name[i]);
+ __swab64s(&id->name[i]);
}
EXPORT_SYMBOL(lustre_swab_ldlm_res_id);
-void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d)
+void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
{
/* the lock data is a union and the first two fields are always an
* extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
@@ -2234,46 +2234,46 @@ void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d)
}
EXPORT_SYMBOL(lustre_swab_ldlm_policy_data);
-void lustre_swab_ldlm_intent (struct ldlm_intent *i)
+void lustre_swab_ldlm_intent(struct ldlm_intent *i)
{
- __swab64s (&i->opc);
+ __swab64s(&i->opc);
}
EXPORT_SYMBOL(lustre_swab_ldlm_intent);
-void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r)
+void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
{
- __swab32s (&r->lr_type);
+ __swab32s(&r->lr_type);
CLASSERT(offsetof(typeof(*r), lr_padding) != 0);
- lustre_swab_ldlm_res_id (&r->lr_name);
+ lustre_swab_ldlm_res_id(&r->lr_name);
}
EXPORT_SYMBOL(lustre_swab_ldlm_resource_desc);
-void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
+void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
{
- lustre_swab_ldlm_resource_desc (&l->l_resource);
- __swab32s (&l->l_req_mode);
- __swab32s (&l->l_granted_mode);
- lustre_swab_ldlm_policy_data (&l->l_policy_data);
+ lustre_swab_ldlm_resource_desc(&l->l_resource);
+ __swab32s(&l->l_req_mode);
+ __swab32s(&l->l_granted_mode);
+ lustre_swab_ldlm_policy_data(&l->l_policy_data);
}
EXPORT_SYMBOL(lustre_swab_ldlm_lock_desc);
-void lustre_swab_ldlm_request (struct ldlm_request *rq)
+void lustre_swab_ldlm_request(struct ldlm_request *rq)
{
- __swab32s (&rq->lock_flags);
- lustre_swab_ldlm_lock_desc (&rq->lock_desc);
- __swab32s (&rq->lock_count);
+ __swab32s(&rq->lock_flags);
+ lustre_swab_ldlm_lock_desc(&rq->lock_desc);
+ __swab32s(&rq->lock_count);
/* lock_handle[] opaque */
}
EXPORT_SYMBOL(lustre_swab_ldlm_request);
-void lustre_swab_ldlm_reply (struct ldlm_reply *r)
+void lustre_swab_ldlm_reply(struct ldlm_reply *r)
{
- __swab32s (&r->lock_flags);
+ __swab32s(&r->lock_flags);
CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
- lustre_swab_ldlm_lock_desc (&r->lock_desc);
+ lustre_swab_ldlm_lock_desc(&r->lock_desc);
/* lock_handle opaque */
- __swab64s (&r->lock_policy_res1);
- __swab64s (&r->lock_policy_res2);
+ __swab64s(&r->lock_policy_res1);
+ __swab64s(&r->lock_policy_res2);
}
EXPORT_SYMBOL(lustre_swab_ldlm_reply);
@@ -2409,7 +2409,7 @@ static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
void _debug_req(struct ptlrpc_request *req,
struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ... )
+ const char *fmt, ...)
{
int req_ok = req->rq_reqmsg != NULL;
int rep_ok = req->rq_repmsg != NULL;
@@ -2457,20 +2457,20 @@ EXPORT_SYMBOL(_debug_req);
void lustre_swab_lustre_capa(struct lustre_capa *c)
{
lustre_swab_lu_fid(&c->lc_fid);
- __swab64s (&c->lc_opc);
- __swab64s (&c->lc_uid);
- __swab64s (&c->lc_gid);
- __swab32s (&c->lc_flags);
- __swab32s (&c->lc_keyid);
- __swab32s (&c->lc_timeout);
- __swab32s (&c->lc_expiry);
+ __swab64s(&c->lc_opc);
+ __swab64s(&c->lc_uid);
+ __swab64s(&c->lc_gid);
+ __swab32s(&c->lc_flags);
+ __swab32s(&c->lc_keyid);
+ __swab32s(&c->lc_timeout);
+ __swab32s(&c->lc_expiry);
}
EXPORT_SYMBOL(lustre_swab_lustre_capa);
void lustre_swab_lustre_capa_key(struct lustre_capa_key *k)
{
- __swab64s (&k->lk_seq);
- __swab32s (&k->lk_keyid);
+ __swab64s(&k->lk_seq);
+ __swab32s(&k->lk_keyid);
CLASSERT(offsetof(typeof(*k), lk_padding) != 0);
}
EXPORT_SYMBOL(lustre_swab_lustre_capa_key);
@@ -2565,3 +2565,10 @@ void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
__swab64s(&msl->msl_flags);
}
EXPORT_SYMBOL(lustre_swab_swap_layouts);
+
+void lustre_swab_close_data(struct close_data *cd)
+{
+ lustre_swab_lu_fid(&cd->cd_fid);
+ __swab64s(&cd->cd_data_version);
+}
+EXPORT_SYMBOL(lustre_swab_close_data);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 4d340f4..6dff502 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -45,7 +45,8 @@
#include "ptlrpc_internal.h"
static int suppress_pings;
-CFS_MODULE_PARM(suppress_pings, "i", int, 0644, "Suppress pings");
+module_param(suppress_pings, int, 0644);
+MODULE_PARM_DESC(suppress_pings, "Suppress pings");
struct mutex pinger_mutex;
static LIST_HEAD(pinger_imports);
@@ -140,9 +141,6 @@ static inline int ptlrpc_next_reconnect(struct obd_import *imp)
return cfs_time_shift(obd_timeout);
}
-static atomic_t suspend_timeouts = ATOMIC_INIT(0);
-static cfs_time_t suspend_wakeup_time = 0;
-
cfs_duration_t pinger_check_timeout(cfs_time_t time)
{
struct timeout_item *item;
@@ -162,67 +160,6 @@ cfs_duration_t pinger_check_timeout(cfs_time_t time)
cfs_time_current());
}
-static wait_queue_head_t suspend_timeouts_waitq;
-
-cfs_time_t ptlrpc_suspend_wakeup_time(void)
-{
- return suspend_wakeup_time;
-}
-
-void ptlrpc_deactivate_timeouts(struct obd_import *imp)
-{
- /*XXX: disabled for now, will be replaced by adaptive timeouts */
-#if 0
- if (imp->imp_no_timeout)
- return;
- imp->imp_no_timeout = 1;
- atomic_inc(&suspend_timeouts);
- CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n",
- atomic_read(&suspend_timeouts));
-#endif
-}
-
-void ptlrpc_activate_timeouts(struct obd_import *imp)
-{
- /*XXX: disabled for now, will be replaced by adaptive timeouts */
-#if 0
- if (!imp->imp_no_timeout)
- return;
- imp->imp_no_timeout = 0;
- LASSERT(atomic_read(&suspend_timeouts) > 0);
- if (atomic_dec_and_test(&suspend_timeouts)) {
- suspend_wakeup_time = cfs_time_current();
- wake_up(&suspend_timeouts_waitq);
- }
- CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n",
- atomic_read(&suspend_timeouts));
-#endif
-}
-
-int ptlrpc_check_suspend(void)
-{
- if (atomic_read(&suspend_timeouts))
- return 1;
- return 0;
-}
-
-int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req)
-{
- struct l_wait_info lwi;
-
- if (atomic_read(&suspend_timeouts)) {
- DEBUG_REQ(D_NET, req, "-- suspend %d regular timeout",
- atomic_read(&suspend_timeouts));
- lwi = LWI_INTR(NULL, NULL);
- l_wait_event(suspend_timeouts_waitq,
- atomic_read(&suspend_timeouts) == 0, &lwi);
- DEBUG_REQ(D_NET, req, "-- recharge regular timeout");
- return 1;
- }
- return 0;
-}
-
-
static bool ir_up;
void ptlrpc_pinger_ir_up(void)
@@ -377,7 +314,6 @@ int ptlrpc_start_pinger(void)
return -EALREADY;
init_waitqueue_head(&pinger_thread.t_ctl_waitq);
- init_waitqueue_head(&suspend_timeouts_waitq);
strcpy(pinger_thread.t_name, "ll_ping");
@@ -576,7 +512,7 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list,
break;
}
}
- LASSERTF(ti != NULL, "ti is NULL ! \n");
+ LASSERTF(ti != NULL, "ti is NULL !\n");
if (list_empty(&ti->ti_obd_list)) {
list_del(&ti->ti_chain);
OBD_FREE_PTR(ti);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index ab36347..e3b5a92 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -77,13 +77,13 @@ void ptlrpc_lprocfs_register_service(struct proc_dir_entry *proc_entry,
struct ptlrpc_service *svc);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
-void ptlrpc_lprocfs_do_request_stat (struct ptlrpc_request *req,
+void ptlrpc_lprocfs_do_request_stat(struct ptlrpc_request *req,
long q_usec, long work_usec);
#else
-#define ptlrpc_lprocfs_register_service(params...) do{}while(0)
-#define ptlrpc_lprocfs_unregister_service(params...) do{}while(0)
-#define ptlrpc_lprocfs_rpc_sent(params...) do{}while(0)
-#define ptlrpc_lprocfs_do_request_stat(params...) do{}while(0)
+#define ptlrpc_lprocfs_register_service(params...) do {} while (0)
+#define ptlrpc_lprocfs_unregister_service(params...) do {} while (0)
+#define ptlrpc_lprocfs_rpc_sent(params...) do {} while (0)
+#define ptlrpc_lprocfs_do_request_stat(params...) do {} while (0)
#endif /* LPROCFS */
/* NRS */
@@ -259,8 +259,14 @@ void sptlrpc_enc_pool_fini(void);
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v);
/* sec_lproc.c */
+#ifdef LPROCFS
int sptlrpc_lproc_init(void);
void sptlrpc_lproc_fini(void);
+#else
+static inline int sptlrpc_lproc_init(void)
+{ return 0; }
+static inline void sptlrpc_lproc_fini(void) {}
+#endif
/* sec_gc.c */
int sptlrpc_gc_init(void);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index 419e634..0efd358 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -112,7 +112,7 @@ __init int ptlrpc_init(void)
return 0;
cleanup:
- switch(cleanup_phase) {
+ switch (cleanup_phase) {
case 8:
ptlrpc_nrs_fini();
case 7:
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 89c9be9..2d26fd5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -77,12 +77,12 @@ struct ptlrpcd {
};
static int max_ptlrpcds;
-CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
- "Max ptlrpcd thread count to be started.");
+module_param(max_ptlrpcds, int, 0644);
+MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
-CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
- "Ptlrpcd threads binding mode.");
+module_param(ptlrpcd_bind_policy, int, 0644);
+MODULE_PARM_DESC(ptlrpcd_bind_policy, "Ptlrpcd threads binding mode.");
static struct ptlrpcd *ptlrpcds;
struct mutex ptlrpcd_mutex;
@@ -600,7 +600,6 @@ static int ptlrpcd_bind(int index, int max)
int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
{
int rc;
- int env = 0;
/*
* Do not allow start second thread for one pc.
@@ -619,6 +618,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
GOTO(out, rc = -ENOMEM);
+
/*
* So far only "client" ptlrpcd uses an environment. In the future,
* ptlrpcd thread (or a thread-set) has to be given an argument,
@@ -626,40 +626,40 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
*/
rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
if (rc != 0)
- GOTO(out, rc);
+ GOTO(out_set, rc);
- env = 1;
{
struct task_struct *task;
-
if (index >= 0) {
rc = ptlrpcd_bind(index, max);
if (rc < 0)
- GOTO(out, rc);
+ GOTO(out_env, rc);
}
task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
if (IS_ERR(task))
- GOTO(out, rc = PTR_ERR(task));
+ GOTO(out_env, rc = PTR_ERR(task));
- rc = 0;
wait_for_completion(&pc->pc_starting);
}
-out:
- if (rc) {
- if (pc->pc_set != NULL) {
- struct ptlrpc_request_set *set = pc->pc_set;
-
- spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
- }
- if (env != 0)
- lu_context_fini(&pc->pc_env.le_ctx);
- clear_bit(LIOD_BIND, &pc->pc_flags);
- clear_bit(LIOD_START, &pc->pc_flags);
+ return 0;
+
+out_env:
+ lu_context_fini(&pc->pc_env.le_ctx);
+
+out_set:
+ if (pc->pc_set != NULL) {
+ struct ptlrpc_request_set *set = pc->pc_set;
+
+ spin_lock(&pc->pc_lock);
+ pc->pc_set = NULL;
+ spin_unlock(&pc->pc_lock);
+ ptlrpc_set_destroy(set);
}
+ clear_bit(LIOD_BIND, &pc->pc_flags);
+
+out:
+ clear_bit(LIOD_START, &pc->pc_flags);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 21de868..590fa8d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -44,19 +44,19 @@
/* The following are visible and mutable through /sys/module/ptlrpc */
int test_req_buffer_pressure = 0;
-CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
- "set non-zero to put pressure on request buffer pools");
-CFS_MODULE_PARM(at_min, "i", int, 0644,
- "Adaptive timeout minimum (sec)");
-CFS_MODULE_PARM(at_max, "i", int, 0644,
- "Adaptive timeout maximum (sec)");
-CFS_MODULE_PARM(at_history, "i", int, 0644,
- "Adaptive timeouts remember the slowest event that took place "
- "within this period (sec)");
-CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
- "How soon before an RPC deadline to send an early reply");
-CFS_MODULE_PARM(at_extra, "i", int, 0644,
- "How much extra time to give with each early reply");
+module_param(test_req_buffer_pressure, int, 0444);
+MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
+module_param(at_min, int, 0644);
+MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
+module_param(at_max, int, 0644);
+MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
+module_param(at_history, int, 0644);
+MODULE_PARM_DESC(at_history,
+ "Adaptive timeouts remember the slowest event that took place within this period (sec)");
+module_param(at_early_margin, int, 0644);
+MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
+module_param(at_extra, int, 0644);
+MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
/* forward ref */
@@ -386,7 +386,7 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
LASSERT(spin_is_locked(&rs->rs_lock));
- LASSERT (rs->rs_difficult);
+ LASSERT(rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */
if (rs->rs_scheduled) { /* being set up or already notified */
@@ -412,7 +412,7 @@ void ptlrpc_commit_replies(struct obd_export *exp)
spin_lock(&exp->exp_uncommitted_replies_lock);
list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
- LASSERT (rs->rs_difficult);
+ LASSERT(rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
@@ -796,7 +796,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
LASSERT(rc == 0);
mutex_lock(&ptlrpc_all_services_mutex);
- list_add (&service->srv_list, &ptlrpc_all_services);
+ list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
if (proc_entry != NULL)
@@ -1115,8 +1115,10 @@ static int ptlrpc_check_req(struct ptlrpc_request *req)
}
if (unlikely(req->rq_export->exp_obd &&
req->rq_export->exp_obd->obd_fail)) {
- /* Failing over, don't handle any more reqs, send
- error response instead. */
+ /*
+ * Failing over, don't handle any more reqs, send
+ * error response instead.
+ */
CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
req, req->rq_export->exp_obd->obd_name);
rc = -ENODEV;
@@ -1268,7 +1270,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
return -ETIMEDOUT;
}
- if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
+ if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
"but no AT support");
return -ENOSYS;
@@ -1777,9 +1779,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
if (rc) {
- CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
- LPU64"\n", svc->srv_req_portal,
- libcfs_id2str(req->rq_peer), req->rq_xid);
+ CERROR("error unpacking ptlrpc body: ptl %d from %s x"
+ LPU64"\n", svc->srv_req_portal,
+ libcfs_id2str(req->rq_peer), req->rq_xid);
goto err_req;
}
@@ -1798,7 +1800,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
goto err_req;
}
- switch(lustre_msg_get_opc(req->rq_reqmsg)) {
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case MDS_WRITEPAGE:
case OST_WRITE:
req->rq_bulk_write = 1;
@@ -1895,7 +1897,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
- if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
do_gettimeofday(&work_start);
@@ -1967,13 +1969,14 @@ put_conn:
lu_context_fini(&request->rq_session);
if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
- DEBUG_REQ(D_WARNING, request, "Request took longer "
- "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
- " client may timeout.",
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
- request->rq_deadline));
+ DEBUG_REQ(D_WARNING, request,
+ "Request took longer than estimated ("
+ CFS_DURATION_T":"CFS_DURATION_T
+ "s); client may timeout.",
+ cfs_time_sub(request->rq_deadline,
+ request->rq_arrival_time.tv_sec),
+ cfs_time_sub(cfs_time_current_sec(),
+ request->rq_deadline));
}
do_gettimeofday(&work_end);
@@ -2037,13 +2040,13 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
exp = rs->rs_export;
- LASSERT (rs->rs_difficult);
- LASSERT (rs->rs_scheduled);
- LASSERT (list_empty(&rs->rs_list));
+ LASSERT(rs->rs_difficult);
+ LASSERT(rs->rs_scheduled);
+ LASSERT(list_empty(&rs->rs_list));
spin_lock(&exp->exp_lock);
/* Noop if removed already */
- list_del_init (&rs->rs_exp_list);
+ list_del_init(&rs->rs_exp_list);
spin_unlock(&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
@@ -2113,9 +2116,9 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
/* Off the net */
spin_unlock(&rs->rs_lock);
- class_export_put (exp);
+ class_export_put(exp);
rs->rs_export = NULL;
- ptlrpc_rs_decref (rs);
+ ptlrpc_rs_decref(rs);
if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c b/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c
deleted file mode 100644
index 93bc40b..0000000
--- a/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/fs.h>
-# include <linux/posix_acl_xattr.h>
-# endif
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_disk.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 9890bd9..3aa4459 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -36,10 +36,8 @@
#define DEBUG_SUBSYSTEM S_RPC
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/fs.h>
-# include <linux/posix_acl_xattr.h>
-# endif
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
#include <obd_support.h>
#include <obd_class.h>
@@ -49,9 +47,10 @@ void lustre_assert_wire_constants(void)
{
/* Wire protocol assertions generated by 'wirecheck'
* (make -C lustre/utils newwiretest)
- * running on Linux deva 2.6.32.279.lustre #5 SMP Tue Apr 9 22:52:17 CST 2013 x86_64 x86_64 x
- * with gcc version 4.4.4 20100726 (Red Hat 4.4.4-13) (GCC) */
-
+ * running on Linux centos6-bis 2.6.32-358.0.1.el6-head
+ * #3 SMP Wed Apr 17 17:37:43 CEST 2013
+ * with gcc version 4.4.6 20110731 (Red Hat 4.4.6-3) (GCC)
+ */
/* Constants... */
LASSERTF(PTL_RPC_MSG_REQUEST == 4711, "found %lld\n",
@@ -432,6 +431,10 @@ void lustre_assert_wire_constants(void)
(unsigned)LMAC_HSM);
LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
(unsigned)LMAC_SOM);
+ LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)LMAC_NOT_IN_OI);
+ LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
+ (unsigned)LMAC_FID_ON_OST);
LASSERTF(OBJ_CREATE == 1, "found %lld\n",
(long long)OBJ_CREATE);
LASSERTF(OBJ_DESTROY == 2, "found %lld\n",
@@ -1335,6 +1338,8 @@ void lustre_assert_wire_constants(void)
OBD_MD_REINT);
LASSERTF(OBD_MD_MEA == (0x0000000400000000ULL), "found 0x%.16llxULL\n",
OBD_MD_MEA);
+ LASSERTF(OBD_MD_TSTATE == (0x0000000800000000ULL),
+ "found 0x%.16llxULL\n", OBD_MD_TSTATE);
LASSERTF(OBD_MD_FLXATTR == (0x0000001000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLXATTR);
LASSERTF(OBD_MD_FLXATTRLS == (0x0000002000000000ULL), "found 0x%.16llxULL\n",
@@ -1918,10 +1923,11 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct mdt_body, blocks));
LASSERTF((int)sizeof(((struct mdt_body *)0)->blocks) == 8, "found %lld\n",
(long long)(int)sizeof(((struct mdt_body *)0)->blocks));
- LASSERTF((int)offsetof(struct mdt_body, unused1) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, unused1));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->unused1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->unused1));
+ LASSERTF((int)offsetof(struct mdt_body, t_state) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, t_state));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->t_state) == 8,
+ "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->t_state));
LASSERTF((int)offsetof(struct mdt_body, fsuid) == 104, "found %lld\n",
(long long)(int)offsetof(struct mdt_body, fsuid));
LASSERTF((int)sizeof(((struct mdt_body *)0)->fsuid) == 4, "found %lld\n",
@@ -4416,6 +4422,64 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_user_item) == 0, "found %lld\n",
(long long)(int)sizeof(((struct hsm_user_request *)0)->hur_user_item));
+ /* Checks for struct hsm_user_import */
+ LASSERTF(sizeof(struct hsm_user_import) == 48, "found %lld\n",
+ (long long)sizeof(struct hsm_user_import));
+ LASSERTF(offsetof(struct hsm_user_import, hui_size) == 0,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_size));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_size) == 8,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_size));
+ LASSERTF(offsetof(struct hsm_user_import, hui_uid) == 32,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_uid));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_uid) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_uid));
+ LASSERTF(offsetof(struct hsm_user_import, hui_gid) == 36,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_gid));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_gid) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_gid));
+ LASSERTF(offsetof(struct hsm_user_import, hui_mode) == 40,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_mode));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mode) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_mode));
+ LASSERTF(offsetof(struct hsm_user_import, hui_atime) == 8,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_atime));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime) == 8,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_atime));
+ LASSERTF(offsetof(struct hsm_user_import, hui_atime_ns) == 24,
+ "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_import, hui_atime_ns));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime_ns) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_atime_ns));
+ LASSERTF(offsetof(struct hsm_user_import, hui_mtime) == 16,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_mtime));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime) == 8,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime));
+ LASSERTF(offsetof(struct hsm_user_import, hui_mtime_ns) == 28,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_mtime_ns));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime_ns) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime_ns));
+ LASSERTF(offsetof(struct hsm_user_import, hui_archive_id) == 44,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_archive_id));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id));
+
/* Checks for struct update_buf */
LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n",
(long long)(int)sizeof(struct update_buf));
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index ac92eaf..8b7bb95 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mm.h>
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 822c487..6cb74da 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/wait.h>
#include <linux/delay.h>
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 766a071..b7044a3 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1009,7 +1009,7 @@ static int ipipe_validate_yee_params(struct vpfe_ipipe_yee *yee)
yee->es_ofst_grad > YEE_THR_MASK)
return -EINVAL;
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT ; i++)
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT; i++)
if (yee->table[i] > YEE_ENTRY_MASK)
return -EINVAL;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index e027b92..2d36b60 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -791,7 +791,7 @@ ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
/* valied table */
tbl = lut_3d->table;
- for (i = 0 ; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
/* Each entry has 0-9 (B), 10-19 (G) and
20-29 R values */
val = tbl[i].b & D3_LUT_ENTRY_MASK;
@@ -899,7 +899,7 @@ ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
if (!gbce->table)
return;
- for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT ; count += 2)
+ for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; count += 2)
w_ip_table(isp5_base_addr, ((gbce->table[count + 1] & mask) <<
GBCE_ENTRY_SHIFT) | (gbce->table[count] & mask),
((count/2) << 2) + GBCE_TB_START_ADDR);
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 081407b..e729e52 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -974,7 +974,7 @@ dt3155_remove(struct pci_dev *pdev)
kfree(pd);
}
-static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(DT3155_VENDOR_ID, DT3155_DEVICE_ID) },
{ 0, /* zero marks the end */ },
};
diff --git a/drivers/staging/media/go7007/go7007-driver.c b/drivers/staging/media/go7007/go7007-driver.c
index 3640df0..6f1beca 100644
--- a/drivers/staging/media/go7007/go7007-driver.c
+++ b/drivers/staging/media/go7007/go7007-driver.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
diff --git a/drivers/staging/media/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index c2d0e58af..814ce08 100644
--- a/drivers/staging/media/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
@@ -25,7 +25,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/device.h>
@@ -722,7 +721,8 @@ static int vti_bitlen(struct go7007 *go)
{
unsigned int i, max_time_incr = go->sensor_framerate / go->fps_scale;
- for (i = 31; (max_time_incr & ((1 << i) - 1)) == max_time_incr; --i);
+ for (i = 31; (max_time_incr & ((1 << i) - 1)) == max_time_incr; --i)
+ ;
return i + 1;
}
diff --git a/drivers/staging/media/go7007/go7007-i2c.c b/drivers/staging/media/go7007/go7007-i2c.c
index 74f25e0..4cf4c0d 100644
--- a/drivers/staging/media/go7007/go7007-i2c.c
+++ b/drivers/staging/media/go7007/go7007-i2c.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/list.h>
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index f846ad5..10bb41c 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/firmware.h>
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index b658c23..2f62be9 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/slab.h>
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 50eb69a..edc52e2 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
diff --git a/drivers/staging/media/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index beaa98b..696a807 100644
--- a/drivers/staging/media/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
diff --git a/drivers/staging/media/go7007/saa7134-go7007.c b/drivers/staging/media/go7007/saa7134-go7007.c
index d80b235..6e2ca33 100644
--- a/drivers/staging/media/go7007/saa7134-go7007.c
+++ b/drivers/staging/media/go7007/saa7134-go7007.c
@@ -86,7 +86,7 @@ static const struct go7007_board_info board_voyager = {
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.num_inputs = 1,
- .inputs = {
+ .inputs = {
{
.name = "SAA7134",
},
diff --git a/drivers/staging/media/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index 4be0fa4..16dd649 100644
--- a/drivers/staging/media/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/sched.h>
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 28c8b0b..f2dcc4a 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -363,8 +363,8 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
/*dummy*/ir->buf_in, /*dummy*/ir->len_in,
/*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
if (ret < 0)
- printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: "
- "error %d\n", ir->devnum, ret);
+ printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
+ ir->devnum, ret);
return 0;
} else if (ret < 0)
printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n",
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index ab2ae11..f2d396c 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -23,7 +23,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -808,7 +807,8 @@ static int imon_probe(struct usb_interface *interface,
/* Input endpoint is mandatory */
if (!ir_ep_found) {
- dev_err(dev, "%s: no valid input (IR) endpoint found.\n", __func__);
+ dev_err(dev, "%s: no valid input (IR) endpoint found.\n",
+ __func__);
retval = -ENODEV;
alloc_status = 2;
goto alloc_status_switch;
@@ -878,8 +878,8 @@ static int imon_probe(struct usb_interface *interface,
alloc_status = 7;
goto unlock;
} else
- dev_info(dev, "Registered iMON driver "
- "(lirc minor: %d)\n", lirc_minor);
+ dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
+ lirc_minor);
/* Needed while unregistering! */
driver->minor = lirc_minor;
@@ -923,8 +923,8 @@ static int imon_probe(struct usb_interface *interface,
if (usb_register_dev(interface, &imon_class)) {
/* Not a fatal error, so ignore */
- dev_info(dev, "%s: could not get a minor number for "
- "display\n", __func__);
+ dev_info(dev, "%s: could not get a minor number for display\n",
+ __func__);
}
}
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 68acca7..d2445fd 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -37,7 +37,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 2e3a985..abe0d5c 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -428,8 +428,8 @@ static int init_timing_params(unsigned int new_duty_cycle,
period = 256 * 1000000L / freq;
pulse_width = period * duty_cycle / 100;
space_width = period - pulse_width;
- dprintk("in init_timing_params, freq=%d pulse=%ld, "
- "space=%ld\n", freq, pulse_width, space_width);
+ dprintk("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
+ freq, pulse_width, space_width);
return 0;
}
#endif /* USE_RDTSC */
@@ -974,7 +974,7 @@ static void set_use_dec(void *data)
spin_unlock_irqrestore(&hardware[type].lock, flags);
}
-static ssize_t lirc_write(struct file *file, const char *buf,
+static ssize_t lirc_write(struct file *file, const char __user *buf,
size_t n, loff_t *ppos)
{
int i, count;
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 0feeaad..e1feb61 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -767,8 +767,8 @@ static int fw_load(struct IR_tx *tx)
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
- zilog_error("firmware haup-ir-blaster.bin not available "
- "(%d)\n", ret);
+ zilog_error("firmware haup-ir-blaster.bin not available (%d)\n",
+ ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index 3675020..480b7c4 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -669,7 +669,7 @@ static void solo_pci_remove(struct pci_dev *pdev)
free_solo_dev(solo_dev);
}
-static DEFINE_PCI_DEVICE_TABLE(solo_id_table) = {
+static const struct pci_device_id solo_id_table[] = {
/* 6010 based cards */
{ PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6010),
.driver_data = SOLO_DEV_6010 },
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1..eedffed 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb)
+static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv)
{
return (u16)smp_processor_id();
}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 49ea76b..bb15220 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -83,7 +83,7 @@ enum nvec_sleep_subcmds {
static struct nvec_chip *nvec_power_handle;
-static struct mfd_cell nvec_devices[] = {
+static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
.id = 1,
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index a417d4f..eccfcc5 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0315f60..a0f4868 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -29,7 +29,6 @@
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 9b4d0b5..47541e1 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/ratelimit.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index bd6ca71..089dc4b 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -26,7 +26,6 @@
**********************************************************************/
#include <linux/platform_device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 92b0289..26b4ec5 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -255,17 +255,19 @@ static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
{
int err;
+ console_lock();
if (!lock_fb_info(dcon->fbinfo)) {
+ console_unlock();
dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
return false;
}
- console_lock();
+
dcon->ignore_fb_events = true;
err = fb_blank(dcon->fbinfo,
blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
dcon->ignore_fb_events = false;
- console_unlock();
unlock_fb_info(dcon->fbinfo);
+ console_unlock();
if (err) {
dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
index 9b86486..bd560c6 100644
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -3,7 +3,6 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "ozdbg.h"
@@ -138,7 +137,7 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
- st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
+ st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC);
if (st == NULL)
return -ENOMEM;
atomic_set(&st->ref_count, 1);
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index ab85a72..7436950 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -4,7 +4,6 @@
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 88714ec..64d94f7 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -4,7 +4,6 @@
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
@@ -337,7 +336,7 @@ static void oz_rx_frame(struct sk_buff *skb)
oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
mac_hdr = skb_mac_header(skb);
- src_addr = &mac_hdr[ETH_ALEN] ;
+ src_addr = &mac_hdr[ETH_ALEN];
length = skb->len;
/* Check the version field */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
index cf26379..edd44c4 100644
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -11,7 +11,6 @@
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 228bffa..617f51c 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -5,7 +5,6 @@
* This file implements the protocol specific parts of the USB service for a PD.
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index cbc15c1..ec4b1fd 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -1590,8 +1590,8 @@ static ssize_t keypad_read(struct file *file,
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- interruptible_sleep_on(&keypad_read_wait);
- if (signal_pending(current))
+ if (wait_event_interruptible(keypad_read_wait,
+ keypad_buflen != 0))
return -EINTR;
}
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 919cb95..3826561 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -69,7 +68,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
+static const struct pci_device_id phison_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000),
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.c b/drivers/staging/rtl8187se/ieee80211/dot11d.c
index 9d2d5c5..4483c2c 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.c
@@ -1,16 +1,6 @@
-//-----------------------------------------------------------------------------
-// File:
-// Dot11d.c
-//
-// Description:
-// Implement 802.11d.
-//
-//-----------------------------------------------------------------------------
-
#include "dot11d.h"
-void
-Dot11d_Init(struct ieee80211_device *ieee)
+void Dot11d_Init(struct ieee80211_device *ieee)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
@@ -22,23 +12,19 @@ Dot11d_Init(struct ieee80211_device *ieee)
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
- printk("Dot11d_Init()\n");
+ netdev_info(ieee->dev, "Dot11d_Init()\n");
}
-//
-// Description:
-// Reset to the state as we are just entering a regulatory domain.
-//
-void
-Dot11d_Reset(struct ieee80211_device *ieee)
+/* Reset to the state as we are just entering a regulatory domain. */
+void Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
- // Clear old channel map
+ /* Clear old channel map */
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- // Set new channel map
+ /* Set new channel map */
for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
@@ -48,36 +34,30 @@ Dot11d_Reset(struct ieee80211_device *ieee)
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
-
- //printk("Dot11d_Reset()\n");
}
-//
-// Description:
-// Update country IE from Beacon or Probe Response
-// and configure PHY for operation in the regulatory domain.
-//
-// TODO:
-// Configure Tx power.
-//
-// Assumption:
-// 1. IS_DOT11D_ENABLE() is TRUE.
-// 2. Input IE is an valid one.
-//
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 *pTaddr,
- u16 CoutryIeLen,
- u8 *pCoutryIe
- )
+/*
+ * Description:
+ * Update country IE from Beacon or Probe Response and configure PHY for
+ * operation in the regulatory domain.
+ *
+ * TODO:
+ * Configure Tx power.
+ *
+ * Assumption:
+ * 1. IS_DOT11D_ENABLE() is TRUE.
+ * 2. Input IE is an valid one.
+ */
+void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
+ u8 index, MaxTxPowerInDbm;
PCHNL_TXPOWER_TRIPLE pTriple;
if ((CoutryIeLen - 3)%3 != 0) {
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
@@ -85,37 +65,47 @@ Dot11d_UpdateCountryIe(
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
+ NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
if (MaxChnlNum >= pTriple->FirstChnl) {
- // It is not in a monotonically increasing order, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ /*
+ * It is not in a monotonically increasing order,
+ * so stop processing.
+ */
+ netdev_info(dev->dev,
+ "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
- if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) {
- // It is not a valid set of channel id, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
+ if (MAX_CHANNEL_NUMBER <
+ (pTriple->FirstChnl + pTriple->NumChnls)) {
+ /*
+ * It is not a valid set of channel id,
+ * so stop processing
+ */
+ netdev_info(dev->dev,
+ "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
Dot11d_Reset(dev);
return;
}
- for (j = 0 ; j < pTriple->NumChnls; j++) {
- pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
- pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
+ for (j = 0; j < pTriple->NumChnls; j++) {
+ index = pTriple->FirstChnl + j;
+ pDot11dInfo->channel_map[index] = 1;
+ MaxTxPowerInDbm = pTriple->MaxTxPowerInDbm;
+ pDot11dInfo->MaxTxPwrDbmList[index] = MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
}
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
}
#if 1
- //printk("Dot11d_UpdateCountryIe(): Channel List:\n");
- printk("Channel List:");
+ netdev_info(dev->dev, "Channel List:");
for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
if (pDot11dInfo->channel_map[i] > 0)
- printk(" %d", i);
- printk("\n");
+ netdev_info(dev->dev, " %d", i);
+ netdev_info(dev->dev, "\n");
#endif
UPDATE_CIE_SRC(dev, pTaddr);
@@ -125,31 +115,23 @@ Dot11d_UpdateCountryIe(
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- )
+u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
if (MAX_CHANNEL_NUMBER < Channel) {
- printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
+ netdev_info(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
- if (pDot11dInfo->channel_map[Channel]) {
+ if (pDot11dInfo->channel_map[Channel])
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
- }
return MaxTxPwrInDbm;
}
-void
-DOT11D_ScanComplete(
- struct ieee80211_device *dev
- )
+void DOT11D_ScanComplete(struct ieee80211_device *dev)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
@@ -160,7 +142,7 @@ DOT11D_ScanComplete(
case DOT11D_STATE_DONE:
if (GET_CIE_WATCHDOG(dev) == 0) {
- // Reset country IE if previous one is gone.
+ /* Reset country IE if previous one is gone. */
Dot11d_Reset(dev);
}
break;
@@ -169,15 +151,12 @@ DOT11D_ScanComplete(
}
}
-int IsLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
if (MAX_CHANNEL_NUMBER < channel) {
- printk("IsLegalChannel(): Invalid Channel\n");
+ netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return 0;
}
if (pDot11dInfo->channel_map[channel] > 0)
@@ -185,10 +164,7 @@ int IsLegalChannel(
return 0;
}
-int ToLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
@@ -202,7 +178,7 @@ int ToLegalChannel(
}
if (MAX_CHANNEL_NUMBER < channel) {
- printk("IsLegalChannel(): Invalid Channel\n");
+ netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.h b/drivers/staging/rtl8187se/ieee80211/dot11d.h
index 029c2ca..63f4f3c 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.h
@@ -3,9 +3,9 @@
#include "ieee80211.h"
-//#define ENABLE_DOT11D
+/* #define ENABLE_DOT11D */
-//#define DOT11D_MAX_CHNL_NUM 83
+/* #define DOT11D_MAX_CHNL_NUM 83 */
typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
@@ -20,18 +20,18 @@ typedef enum _DOT11D_STATE {
}DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
- //DECLARE_RT_OBJECT(RT_DOT11D_INFO);
+ /* DECLARE_RT_OBJECT(RT_DOT12D_INFO); */
- bool bEnabled; // dot11MultiDomainCapabilityEnabled
+ bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
- u16 CountryIeLen; // > 0 if CountryIeBuf[] contains valid country information element.
+ u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; // Source AP of the country IE.
+ u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
u8 CountryIeWatchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; //!!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan)
- //u8 ChnlListLen; // #Bytes valid in ChnlList[].
- //u8 ChnlList[DOT11D_MAX_CHNL_NUM];
+ u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
+ /* u8 ChnlListLen; // #Bytes valid in ChnlList[]. */
+ /* u8 ChnlList[DOT11D_MAX_CHNL_NUM]; */
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
@@ -58,43 +58,13 @@ typedef struct _RT_DOT11D_INFO {
#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
+void Dot11d_Init(struct ieee80211_device *dev);
+void Dot11d_Reset(struct ieee80211_device *dev);
+void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe);
+u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel);
+void DOT11D_ScanComplete(struct ieee80211_device *dev);
+int IsLegalChannel(struct ieee80211_device *dev, u8 channel);
+int ToLegalChannel(struct ieee80211_device *dev, u8 channel);
-void
-Dot11d_Init(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_Reset(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 * pTaddr,
- u16 CoutryIeLen,
- u8 * pCoutryIe
- );
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- );
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device * dev
- );
-
-int IsLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-);
-
-int ToLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-);
-#endif // #ifndef __INC_DOT11D_H
+#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 7f01549..09ffd9b 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -90,6 +90,9 @@
#define IEEE_CRYPT_ALG_NAME_LEN 16
+extern int ieee80211_crypto_tkip_init(void);
+extern void ieee80211_crypto_tkip_exit(void);
+
//by amy for ps
typedef struct ieee_param {
u32 cmd;
@@ -1237,7 +1240,8 @@ static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
return 1;
}
-static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
+static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee,
+ int mode)
{
/*
* It is possible for both access points and our device to support
@@ -1300,19 +1304,16 @@ extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
/* ieee80211_tx.c */
-extern int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len);
+extern int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+ struct sk_buff *frag, int hdr_len);
-extern int ieee80211_rtl_xmit(struct sk_buff *skb,
- struct net_device *dev);
+extern int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats);
+ struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats);
@@ -1328,25 +1329,28 @@ extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key);
extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra);
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
/* ieee80211_softmac.c */
extern short ieee80211_is_54g(const struct ieee80211_network *net);
extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
-extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype);
-extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
-
-extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
+extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ u16 type, u16 stype);
+extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
+ struct ieee80211_network *net);
+
+extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
+ struct ieee80211_device *ieee);
extern void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
extern void ieee80211_start_bss(struct ieee80211_device *ieee);
extern void ieee80211_start_master_bss(struct ieee80211_device *ieee);
@@ -1368,16 +1372,17 @@ extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p);
+extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
+ struct iw_point *p);
extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
-extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn);
+extern void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta,
+ u8 asRsn);
extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
//Add for RF power on power off by lizhaoming 080512
-extern void SendDisassociation(struct ieee80211_device *ieee,
- u8* asSta,
- u8 asRsn);
+extern void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta,
+ u8 asRsn);
/* ieee80211_crypt_ccmp&tkip&wep.c */
extern void ieee80211_tkip_null(void);
@@ -1386,64 +1391,72 @@ extern void ieee80211_ccmp_null(void);
/* ieee80211_softmac_wx.c */
extern int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *ext);
extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra);
-extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b);
+extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
+extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
extern int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
extern void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee);
-extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr);
+extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
+ short pwr);
extern const long ieee80211_wlan_frequencies[];
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
index 694eae3..101f0c0 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
@@ -15,7 +15,6 @@
//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -39,8 +38,7 @@ struct ieee80211_crypto {
static struct ieee80211_crypto *hcrypt;
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
- int force)
+void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
{
struct list_head *ptr, *n;
struct ieee80211_crypt_data *entry;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index f5949e8..c8013d3 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -11,9 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
@@ -61,7 +59,7 @@ struct ieee80211_ccmp_data {
};
void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
+ const u8 pt[16], u8 ct[16])
{
crypto_cipher_encrypt_one((void *)tfm, ct, pt);
}
@@ -130,7 +128,6 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x08));
*/
- // fixed by David :2006.9.6
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
aad_len = 22;
@@ -212,7 +209,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
-// mic = skb_put(skb, CCMP_MIC_LEN);
i = CCMP_PN_LEN - 1;
while (i >= 0) {
@@ -232,7 +228,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[0];
hdr = (struct ieee80211_hdr_4addr *)skb->data;
- //mic is moved to here by john
mic = skb_put(skb, CCMP_MIC_LEN);
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
@@ -416,9 +411,8 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
static char *ieee80211_ccmp_print_stats(char *p, void *priv)
{
struct ieee80211_ccmp_data *ccmp = priv;
- p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
- "tx_pn=%pm rx_pn=%pm "
- "format_errors=%d replays=%d decrypt_errors=%d\n",
+ p += sprintf(p,
+ "key[%d] alg=CCMP key_set=%d tx_pn=%pm rx_pn=%pm format_errors=%d replays=%d decrypt_errors=%d\n",
ccmp->key_idx, ccmp->key_set,
ccmp->tx_pn, ccmp->rx_pn,
ccmp->dot11RSNAStatsCCMPFormatErrors,
@@ -430,7 +424,6 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv)
void ieee80211_ccmp_null(void)
{
-// printk("============>%s()\n", __func__);
return;
}
static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index da24e43..c590796 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -9,9 +9,7 @@
* more details.
*/
-//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
@@ -65,7 +63,7 @@ struct ieee80211_tkip_data {
u8 rx_hdr[16], tx_hdr[16];
};
-static void * ieee80211_tkip_init(int key_idx)
+static void *ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
@@ -304,8 +302,8 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
- struct ieee80211_tkip_data *tkey = priv;
- struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
+ struct ieee80211_tkip_data *tkey = priv;
+ struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
int len;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
@@ -467,27 +465,27 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
-static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
- u8 * data, size_t data_len, u8 * mic)
+static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+ u8 *data, size_t data_len, u8 *mic)
{
- struct hash_desc desc;
- struct scatterlist sg[2];
+ struct hash_desc desc;
+ struct scatterlist sg[2];
- if (tfm_michael == NULL) {
- printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
- return -1;
- }
+ if (tfm_michael == NULL) {
+ printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
+ return -1;
+ }
sg_init_table(sg, 2);
sg_set_buf(&sg[0], hdr, 16);
sg_set_buf(&sg[1], data, data_len);
- if (crypto_hash_setkey(tfm_michael, key, 8))
- return -1;
+ if (crypto_hash_setkey(tfm_michael, key, 8))
+ return -1;
- desc.tfm = tfm_michael;
- desc.flags = 0;
- return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+ desc.tfm = tfm_michael;
+ desc.flags = 0;
+ return crypto_hash_digest(&desc, sg, data_len + 16, mic);
}
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -521,7 +519,8 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
}
-static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
+static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
+ void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
u8 *pos;
@@ -538,12 +537,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
michael_mic_hdr(skb, tkey->tx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
- // }
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
@@ -554,8 +550,8 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
}
static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr_4addr *hdr,
- int keyidx)
+ struct ieee80211_hdr_4addr *hdr,
+ int keyidx)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
@@ -575,7 +571,7 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
}
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv)
+ int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
u8 mic[8];
@@ -587,12 +583,9 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
- // }
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
@@ -688,7 +681,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char * ieee80211_tkip_print_stats(char *p, void *priv)
+static char *ieee80211_tkip_print_stats(char *p, void *priv)
{
struct ieee80211_tkip_data *tkip = priv;
p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
@@ -746,6 +739,4 @@ void ieee80211_crypto_tkip_exit(void)
void ieee80211_tkip_null(void)
{
-// printk("============>%s()\n", __func__);
- return;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index bba7714..f114f9a 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -13,7 +13,6 @@
//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index 3045790..b522b57 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -337,8 +337,9 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
/* Called only as a tasklet (software IRQ), by ieee80211_rx */
static inline int
-ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb,
- int keyidx, struct ieee80211_crypt_data *crypt)
+ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
+ struct sk_buff *skb, int keyidx,
+ struct ieee80211_crypt_data *crypt)
{
struct ieee80211_hdr_4addr *hdr;
int res, hdrlen;
@@ -366,7 +367,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
/* this function is stolen from ipw2200 driver*/
#define IEEE_PACKET_RETRY_TIME (5*HZ)
static int is_duplicate_packet(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *header)
+ struct ieee80211_hdr_4addr *header)
{
u16 fc = le16_to_cpu(header->frame_ctl);
u16 sc = le16_to_cpu(header->seq_ctl);
@@ -467,7 +468,7 @@ drop:
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats)
+ struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
//struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -794,9 +795,7 @@ static inline int ieee80211_is_ofdm_rate(u8 rate)
return 0;
}
-static inline int ieee80211_SignalStrengthTranslate(
- int CurrSS
- )
+static inline int ieee80211_SignalStrengthTranslate(int CurrSS)
{
int RetSS;
@@ -831,12 +830,10 @@ static inline int ieee80211_SignalStrengthTranslate(
return RetSS;
}
-static inline void ieee80211_extract_country_ie(
- struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- struct ieee80211_network *network,
- u8 *addr2
-)
+static inline void
+ieee80211_extract_country_ie(struct ieee80211_device *ieee,
+ struct ieee80211_info_element *info_element,
+ struct ieee80211_network *network, u8 *addr2)
{
if (IS_DOT11D_ENABLE(ieee)) {
if (info_element->len != 0) {
@@ -858,10 +855,8 @@ static inline void ieee80211_extract_country_ie(
}
-static int
-ieee80211_TranslateToDbm(
- unsigned char SignalStrengthIndex // 0-100 index.
- )
+/* SignalStrengthIndex is 0-100 */
+static int ieee80211_TranslateToDbm(unsigned char SignalStrengthIndex)
{
unsigned char SignalPower; // in dBm.
@@ -1197,7 +1192,7 @@ static inline int is_same_network(struct ieee80211_network *src,
}
inline void update_network(struct ieee80211_network *dst,
- struct ieee80211_network *src)
+ struct ieee80211_network *src)
{
unsigned char quality = src->stats.signalstrength;
unsigned char signal = 0;
@@ -1281,10 +1276,10 @@ inline void update_network(struct ieee80211_network *dst,
}
-inline void ieee80211_process_probe_response(
- struct ieee80211_device *ieee,
- struct ieee80211_probe_response *beacon,
- struct ieee80211_rx_stats *stats)
+inline void
+ieee80211_process_probe_response(struct ieee80211_device *ieee,
+ struct ieee80211_probe_response *beacon,
+ struct ieee80211_rx_stats *stats)
{
struct ieee80211_network network;
struct ieee80211_network *target;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 0290706..c27392d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -20,17 +20,17 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/etherdevice.h>
#include "dot11d.h"
u8 rsn_authen_cipher_suite[16][4] = {
- {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
- {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default
- {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default}
- {0x00,0x0F,0xAC,0x03}, //WRAP-historical
- {0x00,0x0F,0xAC,0x04}, //CCMP
- {0x00,0x0F,0xAC,0x05}, //WEP-104
+ {0x00, 0x0F, 0xAC, 0x00}, //Use group key, //Reserved
+ {0x00, 0x0F, 0xAC, 0x01}, //WEP-40 //RSNA default
+ {0x00, 0x0F, 0xAC, 0x02}, //TKIP //NONE //{used just as default}
+ {0x00, 0x0F, 0xAC, 0x03}, //WRAP-historical
+ {0x00, 0x0F, 0xAC, 0x04}, //CCMP
+ {0x00, 0x0F, 0xAC, 0x05}, //WEP-104
};
short ieee80211_is_54g(const struct ieee80211_network *net)
@@ -47,7 +47,7 @@ short ieee80211_is_shortslot(const struct ieee80211_network *net)
* tag and the EXTENDED RATE MFIE tag if needed.
* It encludes two bytes per tag for the tag itself and its len
*/
-unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
+static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
{
unsigned int rate_len = 0;
@@ -65,7 +65,7 @@ unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
-void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -82,7 +82,7 @@ void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -106,7 +106,8 @@ void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
}
-void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
+static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
+{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_GENERIC; //0
@@ -118,35 +119,33 @@ void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
*tag++ = 0x00;
*tag++ = 0x01;
#ifdef SUPPORT_USPD
- if(ieee->current_network.wmm_info & 0x80) {
+ if (ieee->current_network.wmm_info & 0x80)
*tag++ = 0x0f|MAX_SP_Len;
- } else {
+ else
*tag++ = MAX_SP_Len;
- }
#else
*tag++ = MAX_SP_Len;
#endif
*tag_p = tag;
}
-void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) {
+static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
+{
u8 *tag = *tag_p;
-
- *tag++ = MFIE_TYPE_GENERIC; //0
- *tag++ = 7;
- *tag++ = 0x00;
- *tag++ = 0xe0;
- *tag++ = 0x4c;
- *tag++ = 0x01;//5
- *tag++ = 0x02;
- *tag++ = 0x11;
+ *tag++ = MFIE_TYPE_GENERIC; /* 0 */
+ *tag++ = 7;
+ *tag++ = 0x00;
+ *tag++ = 0xe0;
+ *tag++ = 0x4c;
+ *tag++ = 0x01; /* 5 */
+ *tag++ = 0x02;
+ *tag++ = 0x11;
*tag++ = 0x00;
-
*tag_p = tag;
printk(KERN_ALERT "This is enable turbo mode IE process\n");
}
-void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
+static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
@@ -164,7 +163,7 @@ void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
//return 0;
}
-struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
+static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
@@ -179,7 +178,7 @@ struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
return ret;
}
-void init_mgmt_queue(struct ieee80211_device *ieee)
+static void init_mgmt_queue(struct ieee80211_device *ieee)
{
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
@@ -187,7 +186,8 @@ void init_mgmt_queue(struct ieee80211_device *ieee)
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
-inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
+inline void softmac_mgmt_xmit(struct sk_buff *skb,
+ struct ieee80211_device *ieee)
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
@@ -238,7 +238,8 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
}
-inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
+inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
+ struct ieee80211_device *ieee)
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
@@ -276,10 +277,9 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
// dev_kfree_skb_any(skb);//edit by thomas
}
//by amy for power save
-inline struct sk_buff *ieee80211_disassociate_skb(
- struct ieee80211_network *beacon,
- struct ieee80211_device *ieee,
- u8 asRsn)
+inline struct sk_buff *
+ieee80211_disassociate_skb(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee, u8 asRsn)
{
struct sk_buff *skb;
struct ieee80211_disassoc_frame *disass;
@@ -299,12 +299,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
disass->reasoncode = asRsn;
return skb;
}
-void
-SendDisassociation(
- struct ieee80211_device *ieee,
- u8* asSta,
- u8 asRsn
-)
+void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn)
{
struct ieee80211_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -379,7 +374,7 @@ void ext_ieee80211_send_beacon_wq(struct ieee80211_device *ieee)
//spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-void ieee80211_send_beacon(struct ieee80211_device *ieee)
+static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -404,7 +399,7 @@ void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(unsigned long _ieee)
{
struct ieee80211_device *ieee =
(struct ieee80211_device *) _ieee;
@@ -415,7 +410,7 @@ void ieee80211_send_beacon_cb(unsigned long _ieee)
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
-void ieee80211_send_probe(struct ieee80211_device *ieee)
+static void ieee80211_send_probe(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -427,7 +422,7 @@ void ieee80211_send_probe(struct ieee80211_device *ieee)
}
}
-void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
+static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
{
if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
ieee80211_send_probe(ieee);
@@ -438,7 +433,7 @@ void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
/* this performs syncro scan blocking the caller until all channels
* in the allowed channel map has been checked.
*/
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
+static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
{
short ch = 0;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
@@ -576,7 +571,7 @@ out:
DOT11D_ScanComplete(ieee);
}
-void ieee80211_softmac_scan_wq(struct work_struct *work)
+static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
@@ -619,7 +614,7 @@ out:
return;
}
-void ieee80211_beacons_start(struct ieee80211_device *ieee)
+static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -631,7 +626,7 @@ void ieee80211_beacons_start(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-void ieee80211_beacons_stop(struct ieee80211_device *ieee)
+static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -663,7 +658,7 @@ void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
}
-void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
+static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
// unsigned long flags;
@@ -735,8 +730,9 @@ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
}
-inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee, int challengelen)
+inline struct sk_buff *
+ieee80211_authentication_req(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee, int challengelen)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -768,7 +764,8 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
}
-static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
+static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
+ u8 *dest)
{
u8 *tag;
int beacon_size;
@@ -969,7 +966,7 @@ static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
}
-struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
+static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short pwr)
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr* hdr;
@@ -995,7 +992,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
}
-void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
+static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
@@ -1006,7 +1003,7 @@ void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
}
-void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest)
+static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
@@ -1017,7 +1014,7 @@ void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest)
}
-void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
+static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
@@ -1029,7 +1026,9 @@ void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
}
-inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee)
+inline struct sk_buff *
+ieee80211_association_req(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee)
{
struct sk_buff *skb;
//unsigned long flags;
@@ -1164,13 +1163,13 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(unsigned long dev)
{
ieee80211_associate_abort((struct ieee80211_device *) dev);
}
-void ieee80211_associate_step1(struct ieee80211_device *ieee)
+static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -1200,7 +1199,8 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
+ int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -1234,7 +1234,7 @@ void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
kfree(challenge);
}
-void ieee80211_associate_step2(struct ieee80211_device *ieee)
+static void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
struct sk_buff* skb;
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1256,7 +1256,7 @@ void ieee80211_associate_step2(struct ieee80211_device *ieee)
}
}
-void ieee80211_associate_complete_wq(struct work_struct *work)
+static void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
@@ -1277,7 +1277,7 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
netif_carrier_on(ieee->dev);
}
-void ieee80211_associate_complete(struct ieee80211_device *ieee)
+static void ieee80211_associate_complete(struct ieee80211_device *ieee)
{
int i;
del_timer_sync(&ieee->associate_timer);
@@ -1291,7 +1291,7 @@ void ieee80211_associate_complete(struct ieee80211_device *ieee)
queue_work(ieee->wq, &ieee->associate_complete_wq);
}
-void ieee80211_associate_procedure_wq(struct work_struct *work)
+static void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
@@ -1310,7 +1310,8 @@ void ieee80211_associate_procedure_wq(struct work_struct *work)
up(&ieee->wx_sem);
}
-inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
+inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
+ struct ieee80211_network *net)
{
u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
int tmp_ssid_len = 0;
@@ -1423,7 +1424,7 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
}
-static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
+static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
@@ -1449,7 +1450,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
}
-int auth_rq_parse(struct sk_buff *skb,u8* dest)
+static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
@@ -1467,7 +1468,8 @@ int auth_rq_parse(struct sk_buff *skb,u8* dest)
return WLAN_STATUS_SUCCESS;
}
-static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src)
+static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
+ u8 *src)
{
u8 *tag;
u8 *skbend;
@@ -1505,7 +1507,7 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
}
-int assoc_rq_parse(struct sk_buff *skb,u8* dest)
+static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_assoc_request_frame *a;
@@ -1536,8 +1538,8 @@ static inline u16 assoc_parse(struct sk_buff *skb, int *aid)
return le16_to_cpu(a->status);
}
-static inline void
-ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
+static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee,
+ struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
@@ -1551,8 +1553,8 @@ ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
}
}
-inline void
-ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
+inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee,
+ struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
int status;
@@ -1595,7 +1597,8 @@ void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
}
-short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l)
+static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
+ u32 *time_l)
{
int timeout = 0;
@@ -1648,7 +1651,7 @@ short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *ti
}
-inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
u32 th,tl;
@@ -1770,10 +1773,10 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-inline int
-ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
+inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ u16 type, u16 stype)
{
struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
u16 errcode;
@@ -1976,7 +1979,8 @@ associate_complete:
* to the driver later, when it wakes the queue.
*/
-void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
+void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
+ struct ieee80211_device *ieee)
{
@@ -2013,7 +2017,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
}
/* called with ieee->lock acquired */
-void ieee80211_resume_tx(struct ieee80211_device *ieee)
+static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
@@ -2143,7 +2147,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
-void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
+static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
if(ieee->raw_tx){
@@ -2154,7 +2158,7 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
}
}
-void ieee80211_start_ibss_wq(struct work_struct *work)
+static void ieee80211_start_ibss_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
@@ -2327,7 +2331,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
ieee->state = IEEE80211_NOLINK;
}
-void ieee80211_associate_retry_wq(struct work_struct *work)
+static void ieee80211_associate_retry_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
@@ -2619,7 +2623,8 @@ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
}
-void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len)
+static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie,
+ int wpa_ie_len)
{
/* make sure WPA is enabled */
ieee80211_wpa_enable(ieee, 1);
@@ -2628,7 +2633,8 @@ void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int
}
-static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason)
+static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
+ int reason)
{
int ret = 0;
@@ -2652,7 +2658,7 @@ static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int re
static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
- struct ieee_param *param, int plen)
+ struct ieee_param *param, int plen)
{
u8 *buf;
@@ -2706,7 +2712,8 @@ static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
return ret;
}
-static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 value)
+static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
+ u32 value)
{
int ret=0;
unsigned long flags;
@@ -2784,7 +2791,7 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 v
/* implementation borrowed from hostap driver */
static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
- struct ieee_param *param, int param_len)
+ struct ieee_param *param, int param_len)
{
int ret = 0;
@@ -2931,7 +2938,8 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
return ret;
}
-int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p)
+int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
+ struct iw_point *p)
{
struct ieee_param *param;
int ret=0;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index e528206..46f3564 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -28,8 +28,9 @@ const long ieee80211_wlan_frequencies[] = {
};
-int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
int ret;
struct iw_freq *fwrq = &wrqu->freq;
@@ -82,8 +83,8 @@ out:
int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
struct iw_freq *fwrq = &wrqu->freq;
@@ -97,8 +98,8 @@ int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
}
int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
unsigned long flags;
@@ -126,8 +127,7 @@ int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
+ struct iw_request_info *info, union iwreq_data *awrq,
char *extra)
{
@@ -174,8 +174,9 @@ out:
return ret;
}
-int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
int len, ret = 0;
unsigned long flags;
@@ -211,8 +212,8 @@ out:
}
int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
u32 target_rate = wrqu->bitrate.value;
@@ -230,8 +231,8 @@ int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
wrqu->bitrate.value = ieee->rate * 100000;
@@ -239,8 +240,9 @@ int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
return 0;
}
-int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
ieee->sync_scan_hurryup = 1;
@@ -305,8 +307,9 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
}
-int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
int ret = 0;
@@ -333,8 +336,8 @@ out:
}
int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *extra)
{
int ret = 0, len;
@@ -395,8 +398,9 @@ out:
return ret;
}
-int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
wrqu->mode = ieee->iw_mode;
@@ -404,8 +408,8 @@ int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info
}
int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
int *parms = (int *)extra;
@@ -440,8 +444,8 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
}
int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
strlcpy(wrqu->name, "802.11", IFNAMSIZ);
if (ieee->modulation & IEEE80211_CCK_MODULATION) {
@@ -464,8 +468,8 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee,
/* this is mostly stolen from hostap */
int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
int ret = 0;
@@ -525,8 +529,8 @@ exit:
/* this is stolen from hostap */
int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
int ret = 0;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index f5a5219..0dc5ae4 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -177,10 +177,8 @@ static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
return SNAP_SIZE + sizeof(u16);
}
-int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len)
+int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+ struct sk_buff *frag, int hdr_len)
{
struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
int res;
@@ -279,8 +277,8 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
* Classify the to-be send data packet
* Need to acquire the sent queue index.
*/
-static int
-ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
+static int ieee80211_classify(struct sk_buff *skb,
+ struct ieee80211_network *network)
{
struct ether_header *eh = (struct ether_header *)skb->data;
unsigned int wme_UP = 0;
@@ -310,8 +308,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
/* SKBs are added to the ieee->tx_queue. */
-int ieee80211_rtl_xmit(struct sk_buff *skb,
- struct net_device *dev)
+int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index 24d39cc..3b7955f 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -633,8 +633,8 @@ done:
return ret;
}
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
// printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd);
@@ -653,8 +653,8 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
}
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
/*
struct ieee80211_security sec = {
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index d052f4a..8999ec6 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
//#include <linux/config.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -639,20 +638,20 @@ typedef struct r8180_priv
((_ac) == WME_AC_BK) ? BK_PRIORITY : \
BE_PRIORITY)
-short rtl8180_tx(struct net_device *dev,u8* skbuf, int len,int priority,
- short morefrag,short fragdesc,int rate);
+short rtl8180_tx(struct net_device *dev, u8 *skbuf, int len, int priority,
+ short morefrag, short fragdesc, int rate);
u8 read_nic_byte(struct net_device *dev, int x);
u32 read_nic_dword(struct net_device *dev, int x);
-u16 read_nic_word(struct net_device *dev, int x) ;
-void write_nic_byte(struct net_device *dev, int x,u8 y);
-void write_nic_word(struct net_device *dev, int x,u16 y);
-void write_nic_dword(struct net_device *dev, int x,u32 y);
+u16 read_nic_word(struct net_device *dev, int x);
+void write_nic_byte(struct net_device *dev, int x, u8 y);
+void write_nic_word(struct net_device *dev, int x, u16 y);
+void write_nic_dword(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
void rtl8180_rtx_disable(struct net_device *);
-void rtl8180_set_anaparam(struct net_device *dev,u32 a);
-void rtl8185_set_anaparam2(struct net_device *dev,u32 a);
+void rtl8180_set_anaparam(struct net_device *dev, u32 a);
+void rtl8185_set_anaparam2(struct net_device *dev, u32 a);
void rtl8180_set_hw_wep(struct net_device *dev);
void rtl8180_no_hw_wep(struct net_device *dev);
void rtl8180_update_msr(struct net_device *dev);
@@ -661,7 +660,7 @@ void rtl8180_beacon_rx_disable(struct net_device *dev);
int rtl8180_down(struct net_device *dev);
int rtl8180_up(struct net_device *dev);
void rtl8180_commit(struct net_device *dev);
-void rtl8180_set_chan(struct net_device *dev,short ch);
+void rtl8180_set_chan(struct net_device *dev, short ch);
void write_phy(struct net_device *dev, u8 adr, u8 data);
void write_phy_cck(struct net_device *dev, u8 adr, u32 data);
void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
@@ -671,7 +670,8 @@ void IPSEnter(struct net_device *dev);
void IPSLeave(struct net_device *dev);
int get_curr_tx_free_desc(struct net_device *dev, int priority);
void UpdateInitialGain(struct net_device *dev);
-bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt, bool bAntDiversity);
+bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt,
+ bool bAntDiversity);
//#ifdef CONFIG_RTL8185B
void rtl8185b_adapter_start(struct net_device *dev);
@@ -684,6 +684,17 @@ void fix_tx_fifo(struct net_device *dev);
void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch);
void rtl8180_rate_adapter(struct work_struct * work);
//#endif
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u32 ChangeSource);
+bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+ u32 ChangeSource);
#endif
+
+/* fun with the built-in ieee80211 stack... */
+extern int ieee80211_crypto_init(void);
+extern void ieee80211_crypto_deinit(void);
+extern int ieee80211_crypto_tkip_init(void);
+extern void ieee80211_crypto_tkip_exit(void);
+extern int ieee80211_crypto_ccmp_init(void);
+extern void ieee80211_crypto_ccmp_exit(void);
+extern int ieee80211_crypto_wep_init(void);
+extern void ieee80211_crypto_wep_exit(void);
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 76a6738..6cafee2 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -79,7 +79,7 @@ module_param(hwwep, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support. Still broken and not available on all cards");
static int rtl8180_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
+ const struct pci_device_id *id);
static void rtl8180_pci_remove(struct pci_dev *pdev);
@@ -387,7 +387,8 @@ static short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
return 0;
}
-void buffer_free(struct net_device *dev, struct buffer **buffer, int len, short consistent)
+static void buffer_free(struct net_device *dev, struct buffer **buffer, int len,
+ short consistent)
{
struct buffer *tmp, *next;
@@ -1027,7 +1028,7 @@ inline u8 rtl8180_IsWirelessBMode(u16 rate)
u16 N_DBPSOfRate(u16 DataRate);
-u16 ComputeTxTime(u16 FrameLength, u16 DataRate, u8 bManagementFrame,
+static u16 ComputeTxTime(u16 FrameLength, u16 DataRate, u8 bManagementFrame,
u8 bShortPreamble)
{
u16 FrameTime;
@@ -1855,7 +1856,7 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
if (remain == len && !descfrag) {
ownbit_flag = false;
- *tail = *tail | (1<<29) ; /* fist segment of the packet */
+ *tail = *tail | (1<<29); /* fist segment of the packet */
*tail = *tail | (len);
} else {
ownbit_flag = true;
@@ -2238,7 +2239,8 @@ static CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13} /* world wide 13: ch1~ch11 active scan, ch12~13 passive //lzm add 080826 */
};
-static void rtl8180_set_channel_map(u8 channel_plan, struct ieee80211_device *ieee)
+static void rtl8180_set_channel_map(u8 channel_plan,
+ struct ieee80211_device *ieee)
{
int i;
@@ -2340,7 +2342,7 @@ static void rtl8187se_eeprom_register_write(struct eeprom_93cx6 *eeprom)
udelay(10);
}
-short rtl8180_init(struct net_device *dev)
+static short rtl8180_init(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 word;
@@ -2830,11 +2832,8 @@ static struct net_device_stats *rtl8180_stats(struct net_device *dev)
/*
* Change current and default preamble mode.
*/
-bool
-MgntActSet_802_11_PowerSaveMode(
- struct r8180_priv *priv,
- RT_PS_MODE rtPsMode
-)
+static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
+ RT_PS_MODE rtPsMode)
{
/* Currently, we do not change power save mode on IBSS mode. */
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -2845,7 +2844,7 @@ MgntActSet_802_11_PowerSaveMode(
return true;
}
-void LeisurePSEnter(struct r8180_priv *priv)
+static void LeisurePSEnter(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
@@ -2854,7 +2853,7 @@ void LeisurePSEnter(struct r8180_priv *priv)
}
}
-void LeisurePSLeave(struct r8180_priv *priv)
+static void LeisurePSLeave(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps != IEEE80211_PS_DISABLED)
@@ -3078,7 +3077,7 @@ void rtl8180_commit(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
if (priv->up == 0)
- return ;
+ return;
del_timer_sync(&priv->watch_dog_timer);
del_timer_sync(&priv->rateadapter_timer);
@@ -3161,7 +3160,7 @@ static const struct net_device_ops rtl8180_netdev_ops = {
};
static int rtl8180_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
unsigned long ioaddr = 0;
struct net_device *dev = NULL;
@@ -3310,16 +3309,6 @@ static void rtl8180_pci_remove(struct pci_dev *pdev)
DMESG("wlan driver removed\n");
}
-/* fun with the built-in ieee80211 stack... */
-extern int ieee80211_crypto_init(void);
-extern void ieee80211_crypto_deinit(void);
-extern int ieee80211_crypto_tkip_init(void);
-extern void ieee80211_crypto_tkip_exit(void);
-extern int ieee80211_crypto_ccmp_init(void);
-extern void ieee80211_crypto_ccmp_exit(void);
-extern int ieee80211_crypto_wep_init(void);
-extern void ieee80211_crypto_wep_exit(void);
-
static int __init rtl8180_pci_module_init(void)
{
int ret;
@@ -3446,7 +3435,7 @@ static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
default:
spin_unlock_irqrestore(&priv->tx_lock, flag);
- return ;
+ return;
}
nicv = (u32 *)((nic - nicbegin) + (u8 *)begin);
@@ -3537,7 +3526,7 @@ static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
spin_unlock_irqrestore(&priv->tx_lock, flag);
}
-irqreturn_t rtl8180_interrupt(int irq, void *netdev)
+static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8187se/r8180_dm.h b/drivers/staging/rtl8187se/r8180_dm.h
index 732c06a..cb4046f 100644
--- a/drivers/staging/rtl8187se/r8180_dm.h
+++ b/drivers/staging/rtl8187se/r8180_dm.h
@@ -5,7 +5,7 @@
/* #include "r8180_hw.h" */
/* #include "r8180_93cx6.h" */
void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength);
-bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex);
+bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex);
bool SwitchAntenna(struct net_device *dev);
void SwAntennaDiversity(struct net_device *dev);
void SwAntennaDiversityTimerCallback(struct net_device *dev);
diff --git a/drivers/staging/rtl8187se/r8180_hw.h b/drivers/staging/rtl8187se/r8180_hw.h
index 92c05af..e59d74f 100644
--- a/drivers/staging/rtl8187se/r8180_hw.h
+++ b/drivers/staging/rtl8187se/r8180_hw.h
@@ -555,14 +555,14 @@
/* by amy for antenna */
#define EEPROM_SW_REVD_OFFSET 0x3f
-/* BIT[8-9] is for SW Antenna Diversity.
+/* BIT[8-9] is for SW Antenna Diversity.
* Only the value EEPROM_SW_AD_ENABLE means enable, other values are disable.
*/
#define EEPROM_SW_AD_MASK 0x0300
#define EEPROM_SW_AD_ENABLE 0x0100
/* BIT[10-11] determine if Antenna 1 is the Default Antenna.
- * Only the value EEPROM_DEF_ANT_1 means TRUE, other values are FALSE.
+ * Only the value EEPROM_DEF_ANT_1 means TRUE, other values are FALSE.
*/
#define EEPROM_DEF_ANT_MASK 0x0C00
#define EEPROM_DEF_ANT_1 0x0400
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
index c94ca07..de084f0 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ b/drivers/staging/rtl8187se/r8180_rtl8225.h
@@ -28,7 +28,8 @@ u16 RF_ReadReg(struct net_device *dev, u8 offset);
void rtl8180_set_mode(struct net_device *dev, int mode);
void rtl8180_set_mode(struct net_device *dev, int mode);
-bool SetZebraRFPowerState8185(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState);
+bool SetZebraRFPowerState8185(struct net_device *dev,
+ RT_RF_POWER_STATE eRFPowerState);
void rtl8225z4_rf_sleep(struct net_device *dev);
void rtl8225z4_rf_wakeup(struct net_device *dev);
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 4e01653..9b676e0 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -21,9 +21,10 @@
#include "r8180.h"
#include "r8180_hw.h"
+#include <net/iw_handler.h>
#include "ieee80211/dot11d.h"
-u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
+static u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
@@ -61,7 +62,7 @@ static int r8180_wx_set_key(struct net_device *dev,
return 0;
if (erq->length > 0) {
- u32* tkey = (u32*) key;
+ u32 *tkey = (u32 *) key;
priv->key0[0] = tkey[0];
priv->key0[1] = tkey[1];
priv->key0[2] = tkey[2];
@@ -74,8 +75,9 @@ static int r8180_wx_set_key(struct net_device *dev,
}
-static int r8180_wx_set_beaconinterval(struct net_device *dev, struct iw_request_info *aa,
- union iwreq_data *wrqu, char *b)
+static int r8180_wx_set_beaconinterval(struct net_device *dev,
+ struct iw_request_info *aa,
+ union iwreq_data *wrqu, char *b)
{
int *parms = (int *)b;
int bi = parms[0];
@@ -295,7 +297,7 @@ static int rtl8180_wx_get_range(struct net_device *dev,
}
if (val == IW_MAX_FREQUENCIES)
- break;
+ break;
}
range->num_frequency = val;
@@ -311,14 +313,14 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret;
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
if (priv->ieee80211->bHwRadioOff)
return 0;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req* req = (struct iw_scan_req*)b;
+ struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len) {
ieee->current_network.ssid_len = req->essid_len;
memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
@@ -473,9 +475,8 @@ static int r8180_wx_get_frag(struct net_device *dev,
static int r8180_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *awrq, char *extra)
{
int ret;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -516,7 +517,8 @@ static int r8180_wx_set_enc(struct net_device *dev,
down(&priv->wx_sem);
- if (priv->hw_wep) ret = r8180_wx_set_key(dev, info, wrqu, key);
+ if (priv->hw_wep)
+ ret = r8180_wx_set_key(dev, info, wrqu, key);
else {
DMESG("Setting SW wep key");
ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
@@ -537,11 +539,13 @@ static int r8180_wx_get_enc(struct net_device *dev,
}
-static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union
- iwreq_data *wrqu, char *p) {
+static int r8180_wx_set_scan_type(struct net_device *dev,
+ struct iw_request_info *aa,
+ union iwreq_data *wrqu, char *p)
+{
struct r8180_priv *priv = ieee80211_priv(dev);
- int *parms = (int*)p;
+ int *parms = (int *)p;
int mode = parms[0];
if (priv->ieee80211->bHwRadioOff)
@@ -553,8 +557,8 @@ static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info
}
static int r8180_wx_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int err = 0;
@@ -601,8 +605,8 @@ exit:
}
static int r8180_wx_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -625,8 +629,8 @@ static int r8180_wx_get_retry(struct net_device *dev,
}
static int r8180_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
if (priv->rf_set_sens == NULL)
@@ -637,8 +641,8 @@ static int r8180_wx_get_sens(struct net_device *dev,
static int r8180_wx_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -666,8 +670,8 @@ exit:
static int r8180_wx_set_rawtx(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret;
@@ -686,8 +690,8 @@ static int r8180_wx_set_rawtx(struct net_device *dev,
}
static int r8180_wx_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -702,8 +706,8 @@ static int r8180_wx_get_power(struct net_device *dev,
}
static int r8180_wx_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -728,8 +732,8 @@ static int r8180_wx_set_power(struct net_device *dev,
}
static int r8180_wx_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -750,8 +754,8 @@ static int r8180_wx_set_rts(struct net_device *dev,
return 0;
}
static int r8180_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -841,8 +845,8 @@ static int r8180_wx_set_iwmode(struct net_device *dev,
return ret;
}
static int r8180_wx_get_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -858,8 +862,8 @@ static int r8180_wx_get_preamble(struct net_device *dev,
return 0;
}
static int r8180_wx_set_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -872,7 +876,7 @@ static int r8180_wx_set_preamble(struct net_device *dev,
if (*extra < 0 || *extra > 2)
ret = -1;
else
- priv->plcp_preamble_mode = *((short *)extra) ;
+ priv->plcp_preamble_mode = *((short *)extra);
@@ -881,8 +885,8 @@ static int r8180_wx_set_preamble(struct net_device *dev,
return ret;
}
static int r8180_wx_get_siglevel(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -900,8 +904,8 @@ static int r8180_wx_get_siglevel(struct net_device *dev,
return ret;
}
static int r8180_wx_get_sigqual(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -959,8 +963,8 @@ static int r8180_wx_reset_stats(struct net_device *dev,
}
static int r8180_wx_radio_on(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -978,8 +982,8 @@ static int r8180_wx_radio_on(struct net_device *dev,
}
static int r8180_wx_radio_off(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -996,8 +1000,8 @@ static int r8180_wx_radio_off(struct net_device *dev,
}
static int r8180_wx_get_channelplan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1013,8 +1017,8 @@ static int r8180_wx_get_channelplan(struct net_device *dev,
return 0;
}
static int r8180_wx_set_channelplan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int *val = (int *)extra;
@@ -1035,7 +1039,7 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
/* Set new channel map */
for (i = 1; i <= DefaultChannelPlan[*val].Len; i++)
GET_DOT11D_INFO(priv->ieee80211)->channel_map[DefaultChannelPlan[*val].Channel[i-1]] = 1;
-
+
}
up(&priv->wx_sem);
@@ -1043,8 +1047,8 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
}
static int r8180_wx_get_version(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
/* struct ieee80211_device *ieee; */
@@ -1059,8 +1063,8 @@ static int r8180_wx_get_version(struct net_device *dev,
/* added by amy 080818 */
/*receive datarate from user typing valid rate is from 2 to 108 (1 - 54M), if input 0, return to normal rate adaptive. */
static int r8180_wx_set_forcerate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 forcerate = *extra;
@@ -1070,8 +1074,7 @@ static int r8180_wx_set_forcerate(struct net_device *dev,
printk("==============>%s(): forcerate is %d\n", __func__, forcerate);
if ((forcerate == 2) || (forcerate == 4) || (forcerate == 11) || (forcerate == 22) || (forcerate == 12) ||
(forcerate == 18) || (forcerate == 24) || (forcerate == 36) || (forcerate == 48) || (forcerate == 72) ||
- (forcerate == 96) || (forcerate == 108))
- {
+ (forcerate == 96) || (forcerate == 108)) {
priv->ForcedDataRate = 1;
priv->ieee80211->rate = forcerate * 5;
} else if (forcerate == 0) {
@@ -1084,8 +1087,8 @@ static int r8180_wx_set_forcerate(struct net_device *dev,
}
static int r8180_wx_set_enc_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1118,8 +1121,8 @@ static int r8180_wx_set_auth(struct net_device *dev,
}
static int r8180_wx_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1156,65 +1159,48 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
}
-static iw_handler r8180_wx_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
- r8180_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
- r8180_wx_set_freq, /* SIOCSIWFREQ */
- r8180_wx_get_freq, /* SIOCGIWFREQ */
- r8180_wx_set_mode, /* SIOCSIWMODE */
- r8180_wx_get_mode, /* SIOCGIWMODE */
- r8180_wx_set_sens, /* SIOCSIWSENS */
- r8180_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtl8180_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
- r8180_wx_set_wap, /* SIOCSIWAP */
- r8180_wx_get_wap, /* SIOCGIWAP */
- r8180_wx_set_mlme, /* SIOCSIWMLME*/
- dummy, /* SIOCGIWAPLIST -- deprecated */
- r8180_wx_set_scan, /* SIOCSIWSCAN */
- r8180_wx_get_scan, /* SIOCGIWSCAN */
- r8180_wx_set_essid, /* SIOCSIWESSID */
- r8180_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- dummy, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- r8180_wx_set_rate, /* SIOCSIWRATE */
- r8180_wx_get_rate, /* SIOCGIWRATE */
- r8180_wx_set_rts, /* SIOCSIWRTS */
- r8180_wx_get_rts, /* SIOCGIWRTS */
- r8180_wx_set_frag, /* SIOCSIWFRAG */
- r8180_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
- r8180_wx_set_retry, /* SIOCSIWRETRY */
- r8180_wx_get_retry, /* SIOCGIWRETRY */
- r8180_wx_set_enc, /* SIOCSIWENCODE */
- r8180_wx_get_enc, /* SIOCGIWENCODE */
- r8180_wx_set_power, /* SIOCSIWPOWER */
- r8180_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
- r8180_wx_set_auth, /* SIOCSIWAUTH */
- NULL, /* SIOCSIWAUTH */
- r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
-};
+static const iw_handler r8180_wx_handlers[] = {
+ IW_HANDLER(SIOCGIWNAME, r8180_wx_get_name),
+ IW_HANDLER(SIOCSIWNWID, dummy),
+ IW_HANDLER(SIOCGIWNWID, dummy),
+ IW_HANDLER(SIOCSIWFREQ, r8180_wx_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, r8180_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, r8180_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, r8180_wx_get_mode),
+ IW_HANDLER(SIOCSIWSENS, r8180_wx_set_sens),
+ IW_HANDLER(SIOCGIWSENS, r8180_wx_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, rtl8180_wx_get_range),
+ IW_HANDLER(SIOCSIWSPY, dummy),
+ IW_HANDLER(SIOCGIWSPY, dummy),
+ IW_HANDLER(SIOCSIWAP, r8180_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, r8180_wx_get_wap),
+ IW_HANDLER(SIOCSIWMLME, r8180_wx_set_mlme),
+ IW_HANDLER(SIOCGIWAPLIST, dummy), /* deprecated */
+ IW_HANDLER(SIOCSIWSCAN, r8180_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, r8180_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, r8180_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, r8180_wx_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, dummy),
+ IW_HANDLER(SIOCGIWNICKN, dummy),
+ IW_HANDLER(SIOCSIWRATE, r8180_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, r8180_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, r8180_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, r8180_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, r8180_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, r8180_wx_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, dummy),
+ IW_HANDLER(SIOCGIWTXPOW, dummy),
+ IW_HANDLER(SIOCSIWRETRY, r8180_wx_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, r8180_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, r8180_wx_set_enc),
+ IW_HANDLER(SIOCGIWENCODE, r8180_wx_get_enc),
+ IW_HANDLER(SIOCSIWPOWER, r8180_wx_set_power),
+ IW_HANDLER(SIOCGIWPOWER, r8180_wx_get_power),
+ IW_HANDLER(SIOCSIWGENIE, r8180_wx_set_gen_ie),
+ IW_HANDLER(SIOCSIWAUTH, r8180_wx_set_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, r8180_wx_set_enc_ext),
+};
static const struct iw_priv_args r8180_private_args[] = {
{
@@ -1350,7 +1336,7 @@ static iw_handler r8180_private_handler[] = {
};
static inline int is_same_network(struct ieee80211_network *src,
- struct ieee80211_network *dst,
+ struct ieee80211_network *dst,
struct ieee80211_device *ieee)
{
/* A network is only a duplicate if the channel, BSSID, ESSID
@@ -1358,22 +1344,35 @@ static inline int is_same_network(struct ieee80211_network *src,
* We treat all <hidden> with the same BSSID and channel
* as one network
*/
- return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- (src->channel == dst->channel) &&
- !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
- (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- ((src->capability & WLAN_CAPABILITY_IBSS) ==
- (dst->capability & WLAN_CAPABILITY_IBSS)) &&
- ((src->capability & WLAN_CAPABILITY_BSS) ==
- (dst->capability & WLAN_CAPABILITY_BSS)));
+ if (src->channel != dst->channel)
+ return 0;
+
+ if (memcmp(src->bssid, dst->bssid, ETH_ALEN) != 0)
+ return 0;
+
+ if (ieee->iw_mode != IW_MODE_INFRA) {
+ if (src->ssid_len != dst->ssid_len)
+ return 0;
+ if (memcmp(src->ssid, dst->ssid, src->ssid_len) != 0)
+ return 0;
+ }
+
+ if ((src->capability & WLAN_CAPABILITY_IBSS) !=
+ (dst->capability & WLAN_CAPABILITY_IBSS))
+ return 0;
+ if ((src->capability & WLAN_CAPABILITY_BSS) !=
+ (dst->capability & WLAN_CAPABILITY_BSS))
+ return 0;
+
+ return 1;
}
/* WB modified to show signal to GUI on 18-01-2008 */
static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- struct iw_statistics* wstats = &priv->wstats;
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct iw_statistics *wstats = &priv->wstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index dc52a3e..c8b9baf 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -497,7 +497,7 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
*/
RF_WriteReg(dev, 0x0f, (priv->XtalCal_Xin<<5) |
(priv->XtalCal_Xout<<1) | BIT11 | BIT9); mdelay(1);
- printk("ZEBRA_Config_85BASIC_HardCode(): (%02x)\n",
+ netdev_info(dev, "ZEBRA_Config_85BASIC_HardCode(): (%02x)\n",
(priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) |
BIT11 | BIT9);
} else {
@@ -870,9 +870,10 @@ static u8 GetSupportedWirelessMode8185(struct net_device *dev)
return WIRELESS_MODE_B | WIRELESS_MODE_G;
}
-static void ActUpdateChannelAccessSetting(struct net_device *dev,
- WIRELESS_MODE WirelessMode,
- PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
+static void
+ActUpdateChannelAccessSetting(struct net_device *dev,
+ WIRELESS_MODE WirelessMode,
+ PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
{
AC_CODING eACI;
@@ -1084,7 +1085,7 @@ static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
* PASSIVE LEVEL.
*/
static bool SetRFPowerState(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState)
+ RT_RF_POWER_STATE eRFPowerState)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult = false;
@@ -1097,8 +1098,8 @@ static bool SetRFPowerState(struct net_device *dev,
return bResult;
}
-bool MgntActSet_RF_State(struct net_device *dev,
- RT_RF_POWER_STATE StateToSet, u32 ChangeSource)
+bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+ u32 ChangeSource)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bActionAllowed = false;
@@ -1125,7 +1126,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
* to be stuck here.
*/
if (RFWaitCounter > 1000) { /* 1sec */
- printk("MgntActSet_RF_State(): Wait too long to set RF\n");
+ netdev_info(dev, "MgntActSet_RF_State(): Wait too long to set RF\n");
/* TODO: Reset RF state? */
return false;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 2f548eb..8ebe6bc 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -33,7 +33,7 @@ void init_mlme_ap_info(struct adapter *padapter)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- _rtw_spinlock_init(&pmlmepriv->bcn_update_lock);
+ spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
_rtw_init_queue(&pacl_list->acl_node_q);
@@ -43,7 +43,6 @@ void init_mlme_ap_info(struct adapter *padapter)
void free_mlme_ap_info(struct adapter *padapter)
{
- unsigned long irqL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -62,11 +61,9 @@ void free_mlme_ap_info(struct adapter *padapter)
/* free bc/mc sta_info */
psta = rtw_get_bcmc_stainfo(padapter);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
-
- _rtw_spinlock_free(&pmlmepriv->bcn_update_lock);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
}
static void update_BCNTIM(struct adapter *padapter)
@@ -277,7 +274,6 @@ static u8 chk_sta_is_alive(struct sta_info *psta)
void expire_timeout_chk(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *phead, *plist;
u8 updated = 0;
struct sta_info *psta = NULL;
@@ -286,7 +282,7 @@ void expire_timeout_chk(struct adapter *padapter)
char chk_alive_list[NUM_STA];
int i;
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
plist = get_next(phead);
@@ -305,22 +301,22 @@ void expire_timeout_chk(struct adapter *padapter)
DBG_88E("auth expire %6ph\n",
psta->hwaddr);
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
}
}
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
psta = NULL;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -387,7 +383,7 @@ void expire_timeout_chk(struct adapter *padapter)
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
if (chk_alive_num) {
u8 backup_oper_channel = 0;
@@ -424,11 +420,11 @@ void expire_timeout_chk(struct adapter *padapter)
psta->keep_alive_trycnt = 0;
DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
}
if (backup_oper_channel > 0) /* back to the original operation channel */
@@ -535,7 +531,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
static void update_bmc_sta(struct adapter *padapter)
{
- unsigned long irqL;
u32 init_rate = 0;
unsigned char network_type, raid;
int i, supportRateNum = 0;
@@ -604,9 +599,9 @@ static void update_bmc_sta(struct adapter *padapter)
rtw_stassoc_hw_rpt(padapter, psta);
- _enter_critical_bh(&psta->lock, &irqL);
+ spin_lock_bh(&psta->lock);
psta->state = _FW_LINKED;
- _exit_critical_bh(&psta->lock, &irqL);
+ spin_unlock_bh(&psta->lock);
} else {
DBG_88E("add_RATid_bmc_sta error!\n");
@@ -622,7 +617,6 @@ static void update_bmc_sta(struct adapter *padapter)
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -679,9 +673,9 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
_rtw_memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
- _enter_critical_bh(&psta->lock, &irqL);
+ spin_lock_bh(&psta->lock);
psta->state |= _FW_LINKED;
- _exit_critical_bh(&psta->lock, &irqL);
+ spin_unlock_bh(&psta->lock);
}
static void update_hw_ht_param(struct adapter *padapter)
@@ -1134,7 +1128,6 @@ void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
{
- unsigned long irqL;
struct list_head *plist, *phead;
u8 added = false;
int i, ret = 0;
@@ -1148,7 +1141,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
if ((NUM_ACL-1) < pacl_list->num)
return -1;
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
@@ -1166,12 +1159,12 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
if (added)
return ret;
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
for (i = 0; i < NUM_ACL; i++) {
paclnode = &pacl_list->aclnode[i];
@@ -1193,14 +1186,13 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
return ret;
}
int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
- unsigned long irqL;
struct list_head *plist, *phead;
int ret = 0;
struct rtw_wlan_acl_node *paclnode;
@@ -1210,7 +1202,7 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
@@ -1230,7 +1222,7 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
return ret;
@@ -1373,7 +1365,6 @@ static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv;
struct mlme_ext_priv *pmlmeext;
@@ -1386,7 +1377,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
if (!pmlmeext->bstart_bss)
return;
- _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
switch (ie_id) {
case 0xFF:
@@ -1416,7 +1407,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
pmlmepriv->update_bcn = true;
- _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
if (tx)
set_tx_beacon_cmd(padapter);
@@ -1505,12 +1496,11 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
{
/* update associcated stations cap. */
if (updated) {
- unsigned long irqL;
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1523,7 +1513,7 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
VCS_update(padapter, psta);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
}
}
@@ -1731,7 +1721,6 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
bool active, u16 reason)
{
- unsigned long irqL;
u8 beacon_updated = false;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1753,9 +1742,9 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
rtw_clearstakey_cmd(padapter, (u8 *)psta, (u8)(psta->mac_id + 3), true);
- _enter_critical_bh(&psta->lock, &irqL);
+ spin_lock_bh(&psta->lock);
psta->state &= ~_FW_LINKED;
- _exit_critical_bh(&psta->lock, &irqL);
+ spin_unlock_bh(&psta->lock);
rtw_indicate_sta_disassoc_event(padapter, psta);
@@ -1763,16 +1752,15 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
return beacon_updated;
}
int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
{
- unsigned long irqL;
struct list_head *phead, *plist;
int ret = 0;
struct sta_info *psta = NULL;
@@ -1787,7 +1775,7 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1799,7 +1787,7 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
@@ -1808,7 +1796,6 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
int rtw_sta_flush(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *phead, *plist;
int ret = 0;
struct sta_info *psta = NULL;
@@ -1822,7 +1809,7 @@ int rtw_sta_flush(struct adapter *padapter)
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return ret;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1837,7 +1824,7 @@ int rtw_sta_flush(struct adapter *padapter)
ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
@@ -1935,7 +1922,6 @@ void start_ap_mode(struct adapter *padapter)
void stop_ap_mode(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *phead, *plist;
struct rtw_wlan_acl_node *paclnode;
struct sta_info *psta = NULL;
@@ -1954,7 +1940,7 @@ void stop_ap_mode(struct adapter *padapter)
padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
/* for ACL */
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
while ((rtw_end_of_queue_search(phead, plist)) == false) {
@@ -1969,7 +1955,7 @@ void stop_ap_mode(struct adapter *padapter)
pacl_list->num--;
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
DBG_88E("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
@@ -1979,9 +1965,9 @@ void stop_ap_mode(struct adapter *padapter)
rtw_free_all_stainfo(padapter);
psta = rtw_get_bcmc_stainfo(padapter);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
rtw_init_bcmc_stainfo(padapter);
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
index 9f40742..75e38d4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -89,7 +89,7 @@ static inline int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *t
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
int data_len;
- data_len = tag->tag_len + TAG_HDR_LEN;
+ data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
if (skb_tailroom(skb) < data_len) {
_DEBUG_ERR("skb_tailroom() failed in add SID tag!\n");
return -1;
@@ -155,7 +155,7 @@ static inline void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr
static inline void __nat25_generate_ipx_network_addr_with_node(unsigned char *networkAddr,
- unsigned int *ipxNetAddr, unsigned char *ipxNodeAddr)
+ __be32 *ipxNetAddr, unsigned char *ipxNodeAddr)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -166,7 +166,7 @@ static inline void __nat25_generate_ipx_network_addr_with_node(unsigned char *ne
static inline void __nat25_generate_ipx_network_addr_with_socket(unsigned char *networkAddr,
- unsigned int *ipxNetAddr, unsigned short *ipxSocketAddr)
+ __be32 *ipxNetAddr, __be16 *ipxSocketAddr)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -177,7 +177,7 @@ static inline void __nat25_generate_ipx_network_addr_with_socket(unsigned char *
static inline void __nat25_generate_apple_network_addr(unsigned char *networkAddr,
- unsigned short *network, unsigned char *node)
+ __be16 *network, unsigned char *node)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -187,7 +187,7 @@ static inline void __nat25_generate_apple_network_addr(unsigned char *networkAdd
}
static inline void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
- unsigned char *ac_mac, unsigned short *sid)
+ unsigned char *ac_mac, __be16 *sid)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -197,7 +197,7 @@ static inline void __nat25_generate_pppoe_network_addr(unsigned char *networkAdd
}
static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
- unsigned int *ipAddr)
+ __be32 *ipAddr)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -331,7 +331,7 @@ static inline int __nat25_network_hash(unsigned char *networkAddr)
static inline void __network_hash_link(struct adapter *priv,
struct nat25_network_db_entry *ent, int hash)
{
- /* Caller must _enter_critical_bh already! */
+ /* Caller must spin_lock_bh already! */
ent->next_hash = priv->nethash[hash];
if (ent->next_hash != NULL)
ent->next_hash->pprev_hash = &ent->next_hash;
@@ -341,7 +341,7 @@ static inline void __network_hash_link(struct adapter *priv,
static inline void __network_hash_unlink(struct nat25_network_db_entry *ent)
{
- /* Caller must _enter_critical_bh already! */
+ /* Caller must spin_lock_bh already! */
*(ent->pprev_hash) = ent->next_hash;
if (ent->next_hash != NULL)
ent->next_hash->pprev_hash = ent->pprev_hash;
@@ -353,8 +353,7 @@ static int __nat25_db_network_lookup_and_replace(struct adapter *priv,
struct sk_buff *skb, unsigned char *networkAddr)
{
struct nat25_network_db_entry *db;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
db = priv->nethash[__nat25_network_hash(networkAddr)];
while (db != NULL) {
@@ -390,12 +389,12 @@ static int __nat25_db_network_lookup_and_replace(struct adapter *priv,
db->networkAddr[15],
db->networkAddr[16]);
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return 1;
}
db = db->next_hash;
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return 0;
}
@@ -404,23 +403,22 @@ static void __nat25_db_network_insert(struct adapter *priv,
{
struct nat25_network_db_entry *db;
int hash;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
hash = __nat25_network_hash(networkAddr);
db = priv->nethash[hash];
while (db != NULL) {
if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
memcpy(db->macAddr, macAddr, ETH_ALEN);
db->ageing_timer = jiffies;
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return;
}
db = db->next_hash;
}
db = (struct nat25_network_db_entry *) rtw_malloc(sizeof(*db));
if (db == NULL) {
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return;
}
memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
@@ -430,7 +428,7 @@ static void __nat25_db_network_insert(struct adapter *priv,
__network_hash_link(priv, db, hash);
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
}
static void __nat25_db_print(struct adapter *priv)
@@ -444,8 +442,7 @@ static void __nat25_db_print(struct adapter *priv)
void nat25_db_cleanup(struct adapter *priv)
{
int i;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
@@ -464,14 +461,13 @@ void nat25_db_cleanup(struct adapter *priv)
f = g;
}
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
}
void nat25_db_expire(struct adapter *priv)
{
int i;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
@@ -495,7 +491,7 @@ void nat25_db_expire(struct adapter *priv)
f = g;
}
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
}
int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
@@ -811,7 +807,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
/* Handle PPPoE frame */
/*---------------------------------------------------*/
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
- unsigned short *pMagic;
+ __be16 *pMagic;
switch (method) {
case NAT25_CHECK:
@@ -849,7 +845,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
/* insert the magic_code+client mac in relay tag */
- pMagic = (unsigned short *)tag->tag_data;
+ pMagic = (__be16 *)tag->tag_data;
*pMagic = htons(MAGIC_CODE);
memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
@@ -912,7 +908,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
return -1;
}
- pMagic = (unsigned short *)tag->tag_data;
+ pMagic = (__be16 *)tag->tag_data;
if (ntohs(*pMagic) != MAGIC_CODE) {
DEBUG_ERR("Can't find MAGIC_CODE in %s packet!\n",
(ph->code == PADO_CODE ? "PADO" : "PADS"));
@@ -1009,7 +1005,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
- __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
+ __nat25_generate_ipv6_network_addr(networkAddr, (__be32 *)&iph->saddr);
__nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
__nat25_db_print(priv);
@@ -1020,9 +1016,10 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
hdr->icmp6_cksum = 0;
hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
- iph->payload_len,
+ be16_to_cpu(iph->payload_len),
IPPROTO_ICMPV6,
- csum_partial((__u8 *)hdr, iph->payload_len, 0));
+ csum_partial((__u8 *)hdr,
+ be16_to_cpu(iph->payload_len), 0));
}
}
}
@@ -1033,7 +1030,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
- __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->daddr);
+ __nat25_generate_ipv6_network_addr(networkAddr, (__be32 *)&iph->daddr);
__nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
return 0;
default:
@@ -1060,8 +1057,7 @@ int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb)
}
if (!priv->ethBrExtInfo.nat25_disable) {
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
/*
* This function look up the destination network address from
* the NAT2.5 database. Return value = -1 means that the
@@ -1072,9 +1068,9 @@ int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb)
!memcmp(priv->scdb_ip, skb->data+ETH_HLEN+16, 4)) {
memcpy(skb->data, priv->scdb_mac, ETH_ALEN);
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
} else {
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
retval = nat25_db_handle(priv, skb, NAT25_LOOKUP);
}
@@ -1115,17 +1111,17 @@ struct dhcpMessage {
u_int8_t htype;
u_int8_t hlen;
u_int8_t hops;
- u_int32_t xid;
- u_int16_t secs;
- u_int16_t flags;
- u_int32_t ciaddr;
- u_int32_t yiaddr;
- u_int32_t siaddr;
- u_int32_t giaddr;
+ __be32 xid;
+ __be16 secs;
+ __be16 flags;
+ __be32 ciaddr;
+ __be32 yiaddr;
+ __be32 siaddr;
+ __be32 giaddr;
u_int8_t chaddr[16];
u_int8_t sname[64];
u_int8_t file[128];
- u_int32_t cookie;
+ __be32 cookie;
u_int8_t options[308]; /* 312 - cookie */
};
@@ -1178,21 +1174,16 @@ void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
struct nat25_network_db_entry *db;
int hash;
- /* unsigned long irqL; */
- /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
__nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr);
hash = __nat25_network_hash(networkAddr);
db = priv->nethash[hash];
while (db != NULL) {
- if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
- /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN))
return (void *)db;
- }
db = db->next_hash;
}
- /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
return NULL;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f45f4ed..82fe8c4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -38,9 +38,9 @@ int _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv)
_func_enter_;
- _rtw_init_sema(&(pcmdpriv->cmd_queue_sema), 0);
- /* _rtw_init_sema(&(pcmdpriv->cmd_done_sema), 0); */
- _rtw_init_sema(&(pcmdpriv->terminate_cmdthread_sema), 0);
+ sema_init(&(pcmdpriv->cmd_queue_sema), 0);
+ /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
+ sema_init(&(pcmdpriv->terminate_cmdthread_sema), 0);
_rtw_init_queue(&(pcmdpriv->cmd_queue));
@@ -84,7 +84,7 @@ int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
_func_enter_;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
- ATOMIC_SET(&pevtpriv->event_seq, 0);
+ atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
_init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
@@ -104,7 +104,7 @@ _func_enter_;
_cancel_workitem_sync(&pevtpriv->c2h_wk);
while (pevtpriv->c2h_wk_alive)
- rtw_msleep_os(10);
+ msleep(10);
while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
@@ -121,10 +121,6 @@ void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv)
_func_enter_;
if (pcmdpriv) {
- _rtw_spinlock_free(&(pcmdpriv->cmd_queue.lock));
- _rtw_free_sema(&(pcmdpriv->cmd_queue_sema));
- _rtw_free_sema(&(pcmdpriv->terminate_cmdthread_sema));
-
if (pcmdpriv->cmd_allocated_buf)
kfree(pcmdpriv->cmd_allocated_buf);
@@ -153,13 +149,11 @@ _func_enter_;
if (obj == NULL)
goto exit;
- /* _enter_critical_bh(&queue->lock, &irqL); */
- _enter_critical(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
rtw_list_insert_tail(&obj->list, &queue->queue);
- /* _exit_critical_bh(&queue->lock, &irqL); */
- _exit_critical(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
exit:
@@ -175,8 +169,7 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
_func_enter_;
- /* _enter_critical_bh(&(queue->lock), &irqL); */
- _enter_critical(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
if (rtw_is_list_empty(&(queue->queue))) {
obj = NULL;
} else {
@@ -184,8 +177,7 @@ _func_enter_;
rtw_list_delete(&obj->list);
}
- /* _exit_critical_bh(&(queue->lock), &irqL); */
- _exit_critical(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
_func_exit_;
@@ -262,7 +254,7 @@ _func_enter_;
res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
if (res == _SUCCESS)
- _rtw_up_sema(&pcmdpriv->cmd_queue_sema);
+ up(&pcmdpriv->cmd_queue_sema);
exit:
@@ -287,7 +279,7 @@ void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
{
_func_enter_;
pcmdpriv->cmd_done_cnt++;
- /* _rtw_up_sema(&(pcmdpriv->cmd_done_sema)); */
+ /* up(&(pcmdpriv->cmd_done_sema)); */
_func_exit_;
}
@@ -330,7 +322,7 @@ _func_enter_;
pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->cmdthd_running = true;
- _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+ up(&pcmdpriv->terminate_cmdthread_sema);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
@@ -416,11 +408,11 @@ post_process:
rtw_free_cmd_obj(pcmd);
} while (1);
- _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+ up(&pcmdpriv->terminate_cmdthread_sema);
_func_exit_;
- thread_exit();
+ complete_and_exit(NULL, 0);
}
u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
@@ -534,7 +526,7 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
if (res == _SUCCESS) {
- pmlmepriv->scan_start_time = rtw_get_current_time();
+ pmlmepriv->scan_start_time = jiffies;
_set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
@@ -1722,7 +1714,7 @@ _func_enter_;
break;
case LPS_CTRL_SPECIAL_PACKET:
/* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
- pwrpriv->DelayLPSLastTimeStamp = rtw_get_current_time();
+ pwrpriv->DelayLPSLastTimeStamp = jiffies;
LPS_Leave(padapter);
break;
case LPS_CTRL_LEAVE:
@@ -1971,7 +1963,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
while (!val) {
- rtw_msleep_os(100);
+ msleep(100);
cnt++;
@@ -2200,15 +2192,14 @@ _func_exit_;
}
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
_func_enter_;
if (pcmd->res != H2C_SUCCESS) {
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
set_fwstate(pmlmepriv, _FW_LINKED);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
@@ -2246,7 +2237,6 @@ _func_exit_;
void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- unsigned long irqL;
u8 timer_cancelled;
struct sta_info *psta = NULL;
struct wlan_network *pwlan = NULL;
@@ -2263,7 +2253,7 @@ _func_enter_;
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
@@ -2277,18 +2267,16 @@ _func_enter_;
rtw_indicate_connect(padapter);
} else {
- unsigned long irqL;
-
pwlan = _rtw_alloc_network(pmlmepriv);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
if (pwlan == NULL) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
if (pwlan == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto createbss_cmd_fail;
}
- pwlan->last_scanned = rtw_get_current_time();
+ pwlan->last_scanned = jiffies;
} else {
rtw_list_insert_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
}
@@ -2300,13 +2288,13 @@ _func_enter_;
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
}
createbss_cmd_fail:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
rtw_free_cmd_obj(pcmd);
@@ -2332,7 +2320,6 @@ _func_exit_;
void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- unsigned long irqL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
@@ -2349,13 +2336,13 @@ _func_enter_;
psta->aid = passocsta_rsp->cam_id;
psta->mac_id = passocsta_rsp->cam_id;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
set_fwstate(pmlmepriv, _FW_LINKED);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
exit:
rtw_free_cmd_obj(pcmd);
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 0fe5f5d..af32041 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -835,7 +835,6 @@ int proc_get_all_sta_info(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
{
- unsigned long irqL;
struct sta_info *psta;
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
@@ -848,7 +847,7 @@ int proc_get_all_sta_info(char *page, char **start,
len += snprintf(page + len, count - len, "sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
@@ -882,7 +881,7 @@ int proc_get_all_sta_info(char *page, char **start,
}
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
*eof = 1;
return len;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 806f56f..6149e3a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -204,7 +204,7 @@ ReadEFuseByte(
/* This fix the problem that Efuse read error in high temperature condition. */
/* Designer says that there shall be some delay after ready bit is set, or the */
/* result will always stay on last data we read. */
- rtw_udelay_os(50);
+ udelay(50);
value32 = rtw_read32(Adapter, EFUSE_CTRL);
*pbuf = (u8)(value32 & 0xff);
diff --git a/drivers/staging/rtl8188eu/core/rtw_io.c b/drivers/staging/rtl8188eu/core/rtw_io.c
index 10c9c65..ff0398f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_io.c
+++ b/drivers/staging/rtl8188eu/core/rtw_io.c
@@ -205,9 +205,9 @@ void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
_func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
- RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
- ("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
- adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
return;
}
_read_mem = pintfhdl->io_ops._read_mem;
@@ -239,9 +239,9 @@ void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
_func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
- RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
- ("rtw_read_port:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
- adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_port:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
return;
}
@@ -296,7 +296,7 @@ u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt, u8 *pme
if (ret == _SUCCESS)
ret = rtw_sctx_wait(&sctx);
- return ret;
+ return ret;
}
void _rtw_write_port_cancel(struct adapter *adapter)
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 193f641..e25b39b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -68,7 +68,6 @@ _func_exit_;
u8 rtw_do_join(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *plist, *phead;
u8 *pibss = NULL;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -77,7 +76,7 @@ u8 rtw_do_join(struct adapter *padapter)
_func_enter_;
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -92,7 +91,7 @@ _func_enter_;
pmlmepriv->to_join = true;
if (_rtw_queue_empty(queue)) {
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
/* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */
@@ -116,7 +115,7 @@ _func_enter_;
} else {
int select_ret;
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
if (select_ret == _SUCCESS) {
pmlmepriv->to_join = false;
@@ -178,7 +177,6 @@ _func_exit_;
u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
{
- unsigned long irqL;
u8 status = _SUCCESS;
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -195,7 +193,7 @@ _func_enter_;
goto exit;
}
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
@@ -233,7 +231,7 @@ handle_tkip_countermeasure:
/* should we add something here...? */
if (padapter->securitypriv.btkip_countermeasure) {
- cur_time = rtw_get_current_time();
+ cur_time = jiffies;
if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
padapter->securitypriv.btkip_countermeasure = false;
@@ -253,7 +251,7 @@ handle_tkip_countermeasure:
status = rtw_do_join(padapter);
release_mlme_lock:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
@@ -266,7 +264,6 @@ _func_exit_;
u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
{
- unsigned long irqL;
u8 status = _SUCCESS;
u32 cur_time = 0;
@@ -285,7 +282,7 @@ _func_enter_;
goto exit;
}
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
@@ -346,7 +343,7 @@ _func_enter_;
handle_tkip_countermeasure:
if (padapter->securitypriv.btkip_countermeasure) {
- cur_time = rtw_get_current_time();
+ cur_time = jiffies;
if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
padapter->securitypriv.btkip_countermeasure = false;
@@ -367,7 +364,7 @@ handle_tkip_countermeasure:
}
release_mlme_lock:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
@@ -379,7 +376,6 @@ _func_exit_;
u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
enum ndis_802_11_network_infra networktype)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *cur_network = &pmlmepriv->cur_network;
enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
@@ -391,7 +387,7 @@ _func_enter_;
*pold_state, networktype, get_fwstate(pmlmepriv)));
if (*pold_state != networktype) {
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
/* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
@@ -439,7 +435,7 @@ _func_enter_;
case Ndis802_11InfrastructureMax:
break;
}
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
_func_exit_;
@@ -450,12 +446,11 @@ _func_exit_;
u8 rtw_set_802_11_disassociate(struct adapter *padapter)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
_func_enter_;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
@@ -467,7 +462,7 @@ _func_enter_;
rtw_pwr_wakeup(padapter);
}
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
@@ -476,7 +471,6 @@ _func_exit_;
u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
@@ -512,11 +506,11 @@ _func_enter_;
return _SUCCESS;
}
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
exit:
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ac3535d..c738230 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -54,7 +54,7 @@ _func_enter_;
pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
- _rtw_spinlock_init(&(pmlmepriv->lock));
+ spin_lock_init(&(pmlmepriv->lock));
_rtw_init_queue(&(pmlmepriv->free_bss_pool));
_rtw_init_queue(&(pmlmepriv->scanned_queue));
@@ -93,13 +93,6 @@ _func_exit_;
return res;
}
-static void rtw_mfree_mlme_priv_lock (struct mlme_priv *pmlmepriv)
-{
- _rtw_spinlock_free(&pmlmepriv->lock);
- _rtw_spinlock_free(&(pmlmepriv->free_bss_pool.lock));
- _rtw_spinlock_free(&(pmlmepriv->scanned_queue.lock));
-}
-
#if defined (CONFIG_88EU_AP_MODE)
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
@@ -136,8 +129,6 @@ _func_enter_;
rtw_free_mlme_priv_ie_data(pmlmepriv);
if (pmlmepriv) {
- rtw_mfree_mlme_priv_lock (pmlmepriv);
-
if (pmlmepriv->free_bss_buf) {
rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network));
}
@@ -147,18 +138,16 @@ _func_exit_;
int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
{
- unsigned long irql;
-
_func_enter_;
if (pnetwork == NULL)
goto exit;
- _enter_critical_bh(&queue->lock, &irql);
+ spin_lock_bh(&queue->lock);
rtw_list_insert_tail(&pnetwork->list, &queue->queue);
- _exit_critical_bh(&queue->lock, &irql);
+ spin_unlock_bh(&queue->lock);
exit:
@@ -169,13 +158,11 @@ _func_exit_;
struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
{
- unsigned long irql;
-
struct wlan_network *pnetwork;
_func_enter_;
- _enter_critical_bh(&queue->lock, &irql);
+ spin_lock_bh(&queue->lock);
if (_rtw_queue_empty(queue)) {
pnetwork = NULL;
@@ -185,7 +172,7 @@ _func_enter_;
rtw_list_delete(&(pnetwork->list));
}
- _exit_critical_bh(&queue->lock, &irql);
+ spin_unlock_bh(&queue->lock);
_func_exit_;
@@ -194,14 +181,13 @@ _func_exit_;
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
{
- unsigned long irql;
struct wlan_network *pnetwork;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
struct list_head *plist = NULL;
_func_enter_;
- _enter_critical_bh(&free_queue->lock, &irql);
+ spin_lock_bh(&free_queue->lock);
if (_rtw_queue_empty(free_queue) == true) {
pnetwork = NULL;
@@ -216,14 +202,14 @@ _func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist));
pnetwork->network_type = 0;
pnetwork->fixed = false;
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
pnetwork->aid = 0;
pnetwork->join_res = 0;
pmlmepriv->num_of_scanned++;
exit:
- _exit_critical_bh(&free_queue->lock, &irql);
+ spin_unlock_bh(&free_queue->lock);
_func_exit_;
@@ -234,7 +220,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwo
{
u32 curr_time, delta_time;
u32 lifetime = SCANQUEUE_LIFETIME;
- unsigned long irql;
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
_func_enter_;
@@ -244,7 +229,7 @@ _func_enter_;
if (pnetwork->fixed)
goto exit;
- curr_time = rtw_get_current_time();
+ curr_time = jiffies;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
lifetime = 1;
@@ -253,11 +238,11 @@ _func_enter_;
if (delta_time < lifetime)/* unit:sec */
goto exit;
}
- _enter_critical_bh(&free_queue->lock, &irql);
+ spin_lock_bh(&free_queue->lock);
rtw_list_delete(&(pnetwork->list));
rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue));
pmlmepriv->num_of_scanned--;
- _exit_critical_bh(&free_queue->lock, &irql);
+ spin_unlock_bh(&free_queue->lock);
exit:
_func_exit_;
@@ -315,7 +300,6 @@ _func_exit_;
void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
{
- unsigned long irql;
struct list_head *phead, *plist;
struct wlan_network *pnetwork;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -324,7 +308,7 @@ void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
_func_enter_;
- _enter_critical_bh(&scanned_queue->lock, &irql);
+ spin_lock_bh(&scanned_queue->lock);
phead = get_list_head(scanned_queue);
plist = get_next(phead);
@@ -336,7 +320,7 @@ _func_enter_;
_rtw_free_network(pmlmepriv, pnetwork, isfreeall);
}
- _exit_critical_bh(&scanned_queue->lock, &irql);
+ spin_unlock_bh(&scanned_queue->lock);
_func_exit_;
}
@@ -361,7 +345,7 @@ _func_exit_;
void rtw_generate_random_ibss(u8 *pibss)
{
- u32 curtime = rtw_get_current_time();
+ u32 curtime = jiffies;
_func_enter_;
pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
@@ -592,7 +576,6 @@ Caller must hold pmlmepriv->lock first.
*/
void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
{
- unsigned long irql;
struct list_head *plist, *phead;
u32 bssid_ex_sz;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
@@ -602,7 +585,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
_func_enter_;
- _enter_critical_bh(&queue->lock, &irql);
+ spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
plist = get_next(phead);
@@ -630,7 +613,7 @@ _func_enter_;
memcpy(&(pnetwork->network), target, get_wlan_bssid_ex_sz(target));
/* variable initialize */
pnetwork->fixed = false;
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
pnetwork->network_type = 0;
pnetwork->aid = 0;
@@ -654,7 +637,7 @@ _func_enter_;
rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna));
memcpy(&(pnetwork->network), target, bssid_ex_sz);
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
/* bss info not receiving from the right channel */
if (pnetwork->network.PhyInfo.SignalQuality == 101)
@@ -668,7 +651,7 @@ _func_enter_;
*/
bool update_ie = true;
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
/* target.Reserved[0]== 1, means that scanned network is a bcn frame. */
if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
@@ -678,7 +661,7 @@ _func_enter_;
}
exit:
- _exit_critical_bh(&queue->lock, &irql);
+ spin_unlock_bh(&queue->lock);
_func_exit_;
}
@@ -754,7 +737,6 @@ _func_exit_;
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql;
u32 len;
struct wlan_bssid_ex *pnetwork;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
@@ -770,23 +752,22 @@ _func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n****rtw_survey_event_callback: return a wrong bss ***\n"));
return;
}
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
/* update IBSS_network 's timestamp */
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
struct wlan_network *ibss_wlan = NULL;
- unsigned long irql;
memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
if (ibss_wlan) {
memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto exit;
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
}
}
@@ -799,7 +780,7 @@ _func_enter_;
exit:
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
@@ -810,12 +791,11 @@ _func_exit_;
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct mlme_ext_priv *pmlmeext;
_func_enter_;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->wps_probe_req_ie) {
pmlmepriv->wps_probe_req_ie_len = 0;
@@ -894,7 +874,7 @@ _func_enter_;
indicate_wx_scan_complete_event(adapter);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
@@ -917,7 +897,6 @@ void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf)
static void free_scanqueue(struct mlme_priv *pmlmepriv)
{
- unsigned long irql, irql0;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
struct __queue *scan_queue = &pmlmepriv->scanned_queue;
struct list_head *plist, *phead, *ptemp;
@@ -925,8 +904,8 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
- _enter_critical_bh(&scan_queue->lock, &irql0);
- _enter_critical_bh(&free_queue->lock, &irql);
+ spin_lock_bh(&scan_queue->lock);
+ spin_lock_bh(&free_queue->lock);
phead = get_list_head(scan_queue);
plist = get_next(phead);
@@ -939,8 +918,8 @@ _func_enter_;
pmlmepriv->num_of_scanned--;
}
- _exit_critical_bh(&free_queue->lock, &irql);
- _exit_critical_bh(&scan_queue->lock, &irql0);
+ spin_unlock_bh(&free_queue->lock);
+ spin_unlock_bh(&scan_queue->lock);
_func_exit_;
}
@@ -950,7 +929,6 @@ _func_exit_;
*/
void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
{
- unsigned long irql;
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct sta_priv *pstapriv = &adapter->stapriv;
@@ -968,9 +946,9 @@ _func_enter_;
psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
}
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) {
@@ -979,15 +957,15 @@ _func_enter_;
rtw_free_all_stainfo(adapter);
psta = rtw_get_bcmc_stainfo(adapter);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
rtw_init_bcmc_stainfo(adapter);
}
if (lock_scanned_queue)
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan)
@@ -999,7 +977,7 @@ _func_enter_;
rtw_free_network_nolock(pmlmepriv, pwlan);
if (lock_scanned_queue)
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
pmlmepriv->key_mask = 0;
_func_exit_;
}
@@ -1075,14 +1053,14 @@ void rtw_scan_abort(struct adapter *adapter)
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv);
- start = rtw_get_current_time();
+ start = jiffies;
pmlmeext->scan_abort = true;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
rtw_get_passing_time_ms(start) <= 200) {
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
break;
DBG_88E(FUNC_NDEV_FMT"fw_state=_FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
- rtw_msleep_os(20);
+ msleep(20);
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
@@ -1219,7 +1197,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql, irql2;
u8 timer_cancelled;
struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
struct sta_priv *pstapriv = &adapter->stapriv;
@@ -1249,12 +1226,12 @@ _func_enter_;
goto ignore_nolock;
}
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nrtw_joinbss_event_callback!! _enter_critical\n"));
if (pnetwork->join_res > 0) {
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -1267,9 +1244,9 @@ _func_enter_;
pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
if (pcur_sta) {
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, pcur_sta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
}
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
@@ -1291,7 +1268,7 @@ _func_enter_;
rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
} else {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't find ptarget_wlan when joinbss_event callback\n"));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
@@ -1301,7 +1278,7 @@ _func_enter_;
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
if (ptarget_sta == NULL) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
}
@@ -1321,11 +1298,11 @@ _func_enter_;
} else {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
} else if (pnetwork->join_res == -4) {
rtw_reset_securitypriv(adapter);
@@ -1341,7 +1318,7 @@ _func_enter_;
}
ignore_joinbss_callback:
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
ignore_nolock:
_func_exit_;
}
@@ -1405,7 +1382,6 @@ void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta)
void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql;
struct sta_info *psta;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
@@ -1449,20 +1425,20 @@ _func_enter_;
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
psta->ieee8021x_blocked = false;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) {
if (adapter->stapriv.asoc_sta_count == 2) {
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
if (ptarget_wlan)
ptarget_wlan->fixed = true;
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* a sta + bc/mc_stainfo (not Ibss_stainfo) */
rtw_indicate_connect(adapter);
}
}
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
mlmeext_sta_add_event_callback(adapter, psta);
exit:
_func_exit_;
@@ -1470,7 +1446,6 @@ _func_exit_;
void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql, irql2;
int mac_id = -1;
struct sta_info *psta;
struct wlan_network *pwlan = NULL;
@@ -1503,7 +1478,7 @@ _func_enter_;
mlmeext_sta_del_event_callback(adapter);
- _enter_critical_bh(&pmlmepriv->lock, &irql2);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
if (pmlmepriv->to_roaming > 0)
@@ -1518,31 +1493,31 @@ _func_enter_;
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
/* remove the network entry in scanned_queue */
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan) {
pwlan->fixed = false;
rtw_free_network_nolock(pmlmepriv, pwlan);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
_rtw_roaming(adapter, tgt_network);
}
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
if (adapter->stapriv.asoc_sta_count == 1) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
/* free old ibss network */
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan) {
pwlan->fixed = false;
rtw_free_network_nolock(pmlmepriv, pwlan);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* re-create ibss */
pdev_network = &(adapter->registrypriv.dev_network);
pibss = adapter->registrypriv.dev_network.MacAddress;
@@ -1565,7 +1540,7 @@ _func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error=>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
}
}
- _exit_critical_bh(&pmlmepriv->lock, &irql2);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
}
@@ -1582,7 +1557,6 @@ _func_exit_;
*/
void _rtw_join_timeout_handler (struct adapter *adapter)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
@@ -1594,7 +1568,7 @@ _func_enter_;
return;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->to_roaming > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1617,7 +1591,7 @@ _func_enter_;
rtw_indicate_disconnect(adapter);
free_scanqueue(pmlmepriv);/* */
}
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
}
@@ -1627,13 +1601,12 @@ _func_exit_;
*/
void rtw_scan_timeout_handler (struct adapter *adapter)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
rtw_indicate_scan_done(adapter, true);
}
@@ -1761,7 +1734,6 @@ pmlmepriv->lock
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
{
- unsigned long irql;
int ret;
struct list_head *phead;
struct adapter *adapter;
@@ -1772,7 +1744,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
_func_enter_;
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
adapter = (struct adapter *)pmlmepriv->nic_hdl;
pmlmepriv->pscanned = get_next(phead);
@@ -1819,7 +1791,7 @@ _func_enter_;
ret = rtw_joinbss_cmd(adapter, candidate);
exit:
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
_func_exit_;
@@ -2394,12 +2366,11 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
_rtw_roaming(padapter, tgt_network);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 7ab5ff0..6f7e415 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -221,7 +221,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
_12M_RATE_, _24M_RATE_, 0xff,
};
- ATOMIC_SET(&pmlmeext->event_seq, 0);
+ atomic_set(&pmlmeext->event_seq, 0);
pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
pmlmeext->cur_channel = padapter->registrypriv.channel;
@@ -756,7 +756,6 @@ _END_ONBEACON_:
unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- unsigned long irqL;
unsigned int auth_mode, ie_len;
u16 seq;
unsigned char *sa, *p;
@@ -817,24 +816,24 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
pstat->state = WIFI_FW_AUTH_NULL;
pstat->auth_seq = 0;
} else {
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&pstat->asoc_list)) {
rtw_list_delete(&pstat->asoc_list);
pstapriv->asoc_list_cnt--;
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
if (seq == 1) {
/* TODO: STA re_auth and auth timeout */
}
}
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
if (rtw_is_list_empty(&pstat->auth_list)) {
rtw_list_insert_tail(&pstat->auth_list, &pstapriv->auth_list);
pstapriv->auth_list_cnt++;
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
if (pstat->auth_seq == 0)
pstat->expire_to = pstapriv->auth_to;
@@ -1005,7 +1004,6 @@ authclnt_fail:
unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- unsigned long irqL;
u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
@@ -1408,20 +1406,20 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
pstat->state &= (~WIFI_FW_ASSOC_STATE);
pstat->state |= WIFI_FW_ASSOC_SUCCESS;
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
if (!rtw_is_list_empty(&pstat->auth_list)) {
rtw_list_delete(&pstat->auth_list);
pstapriv->auth_list_cnt--;
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (rtw_is_list_empty(&pstat->asoc_list)) {
pstat->expire_to = pstapriv->expire_to;
rtw_list_insert_tail(&pstat->asoc_list, &pstapriv->asoc_list);
pstapriv->asoc_list_cnt++;
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
@@ -1590,7 +1588,6 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- unsigned long irqL;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1601,13 +1598,13 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
if (psta) {
u8 updated = 0;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&psta->asoc_list)) {
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, reason);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
}
@@ -1654,14 +1651,9 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- unsigned long irqL;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- /* _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
- /* rtw_free_stainfo(padapter, psta); */
- /* _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
-
DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
reason, GetAddr2Ptr(pframe));
@@ -1669,13 +1661,13 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
if (psta) {
u8 updated = 0;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&psta->asoc_list)) {
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, reason);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
}
@@ -3826,7 +3818,7 @@ int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
do {
ret = _issue_probereq_p2p(adapter, da, wait_ms > 0 ? true : false);
@@ -3837,7 +3829,7 @@ int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -4419,7 +4411,7 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr
if (ret == _SUCCESS)
ret = rtw_sctx_wait(&sctx);
- return ret;
+ return ret;
}
s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
@@ -4487,9 +4479,6 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
__le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
-#if defined(CONFIG_88EU_AP_MODE)
- unsigned long irqL;
-#endif /* if defined (CONFIG_88EU_AP_MODE) */
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -4505,7 +4494,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
return;
}
#if defined (CONFIG_88EU_AP_MODE)
- _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
#endif /* if defined (CONFIG_88EU_AP_MODE) */
/* update attribute */
@@ -4690,7 +4679,7 @@ _issue_bcn:
#if defined (CONFIG_88EU_AP_MODE)
pmlmepriv->update_bcn = false;
- _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
#endif /* if defined (CONFIG_88EU_AP_MODE) */
if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
@@ -4972,7 +4961,7 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
do {
ret = _issue_probereq(padapter, pssid, da, wait_ms > 0 ? true : false);
@@ -4983,7 +4972,7 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
@@ -5693,7 +5682,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -5710,7 +5699,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -5816,7 +5805,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -5833,7 +5822,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -5934,7 +5923,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
do {
ret = _issue_deauth(padapter, da, reason, wait_ms > 0 ? true : false);
@@ -5945,7 +5934,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -6156,7 +6145,6 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
static void issue_action_BSSCoexistPacket(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *plist, *phead;
unsigned char category, action;
struct xmit_frame *pmgntframe;
@@ -6231,7 +6219,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (pmlmepriv->num_sta_no_ht > 0) {
int i;
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -6261,7 +6249,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
ICS[0][0] = 1;
}
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
for (i = 0; i < 8; i++) {
if (ICS[i][0] == 1) {
@@ -6338,14 +6326,14 @@ unsigned int send_beacon(struct adapter *padapter)
int issue = 0;
int poll = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
do {
issue_beacon(padapter, 100);
issue++;
do {
- rtw_yield_os();
+ yield();
rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
} while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
@@ -6435,7 +6423,7 @@ void site_survey(struct adapter *padapter)
if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
/* todo: to issue two probe req??? */
issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
- /* rtw_msleep_os(SURVEY_TO>>1); */
+ /* msleep(SURVEY_TO>>1); */
issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
}
}
@@ -6443,7 +6431,7 @@ void site_survey(struct adapter *padapter)
if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
/* todo: to issue two probe req??? */
issue_probereq(padapter, NULL, NULL);
- /* rtw_msleep_os(SURVEY_TO>>1); */
+ /* msleep(SURVEY_TO>>1); */
issue_probereq(padapter, NULL, NULL);
}
}
@@ -7082,7 +7070,7 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct survey_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
@@ -7134,7 +7122,7 @@ void report_surveydone_event(struct adapter *padapter)
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct surveydone_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
@@ -7180,7 +7168,7 @@ void report_join_res(struct adapter *padapter, int res)
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct joinbss_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
@@ -7233,7 +7221,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stadel_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
@@ -7288,7 +7276,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stassoc_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
@@ -8334,7 +8322,7 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
goto _abort_event_;
}
- ATOMIC_INC(&pevt_priv->event_seq);
+ atomic_inc(&pevt_priv->event_seq);
peventbuf += 2;
@@ -8365,7 +8353,6 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
}
#ifdef CONFIG_88EU_AP_MODE
else { /* tx bc/mc frames after update TIM */
- unsigned long irqL;
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
@@ -8377,8 +8364,8 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
return H2C_SUCCESS;
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
- rtw_msleep_os(10);/* 10ms, ATIM(HIQ) Windows */
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ msleep(10);/* 10ms, ATIM(HIQ) Windows */
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -8400,12 +8387,12 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
pxmitframe->attrib.qsel = 0x11;/* HIQ */
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
}
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
}
}
#endif
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index 9832dcb..6451efd 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -323,10 +323,7 @@ s32 mp_start_test(struct adapter *padapter)
struct sta_info *psta;
u32 length;
u8 val8;
-
- unsigned long irqL;
s32 res = _SUCCESS;
-
struct mp_priv *pmppriv = &padapter->mppriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
@@ -379,7 +376,7 @@ s32 mp_start_test(struct adapter *padapter)
else
bssid.Length = length;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
goto end_of_mp_start_test;
@@ -420,7 +417,7 @@ s32 mp_start_test(struct adapter *padapter)
end_of_mp_start_test:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
if (res == _SUCCESS) {
/* set MSR to WIFI_FW_ADHOC_STATE */
@@ -439,11 +436,9 @@ void mp_stop_test(struct adapter *padapter)
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
struct sta_info *psta;
- unsigned long irqL;
-
if (pmppriv->mode == MP_ON) {
pmppriv->bSetTxPower = 0;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)
goto end_of_mp_stop_test;
@@ -465,7 +460,7 @@ void mp_stop_test(struct adapter *padapter)
end_of_mp_stop_test:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
}
@@ -614,7 +609,7 @@ static int mp_xmit_packet_thread(void *context)
padapter->bDriverStopped) {
goto exit;
} else {
- rtw_msleep_os(1);
+ msleep(1);
continue;
}
}
@@ -643,7 +638,7 @@ exit:
pmptx->pallocated_buf = NULL;
pmptx->stop = 1;
- thread_exit();
+ complete_and_exit(NULL, 0);
}
void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc)
@@ -863,11 +858,11 @@ static u32 rtw_GetPSDData(struct adapter *pAdapter, u32 point)
psd_val |= point;
rtw_write32(pAdapter, 0x808, psd_val);
- rtw_mdelay_os(1);
+ mdelay(1);
psd_val |= 0x00400000;
rtw_write32(pAdapter, 0x808, psd_val);
- rtw_mdelay_os(1);
+ mdelay(1);
psd_val = rtw_read32(pAdapter, 0x8B4);
psd_val &= 0x0000FFFF;
@@ -920,7 +915,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
i++;
}
- rtw_msleep_os(100);
+ msleep(100);
return strlen(data)+1;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
index f06312c..edcd8a5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
@@ -690,7 +690,7 @@ _func_enter_;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
DBG_88E("%s: pkt tx is running...\n", __func__);
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
@@ -725,7 +725,7 @@ _func_enter_;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
DBG_88E("%s: pkt tx is running...\n", __func__);
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
@@ -760,7 +760,7 @@ _func_enter_;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
DBG_88E("%s: pkt tx is running...\n", __func__);
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
index f46cab1..6e8c06e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_p2p.c
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -40,7 +40,6 @@ static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8 *ch_list, u8 ch_cnt)
static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
{
- unsigned long irqL;
struct list_head *phead, *plist;
u32 len = 0;
u16 attr_len = 0;
@@ -56,7 +55,7 @@ static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
pstart = pdata_attr;
pcur = pdata_attr;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -120,7 +119,7 @@ static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
pstart = pcur;
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
if (attr_len > 0)
len = rtw_set_p2p_attr_content(pbuf, P2P_ATTR_GROUP_INFO, attr_len, pdata_attr);
@@ -977,10 +976,9 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
_rtw_memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
attr_contentlen = 0;
if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
- unsigned long irqL;
struct list_head *phead, *plist;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1000,7 +998,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
status = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
} else {
status = P2P_STATUS_FAIL_INVALID_PARAM;
}
@@ -1497,9 +1495,8 @@ u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
static void find_phase_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid;
- unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
_func_enter_;
@@ -1509,10 +1506,9 @@ _func_enter_;
rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
-
-
+ spin_lock_bh(&pmlmepriv->lock);
+ rtw_sitesurvey_cmd(padapter, &ssid, 1, NULL, 0);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
}
@@ -1833,13 +1829,12 @@ static void pre_tx_scan_timer_process(void *FunctionContext)
{
struct adapter *adapter = (struct adapter *)FunctionContext;
struct wifidirect_info *pwdinfo = &adapter->wdinfo;
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
if (pwdinfo->tx_prov_disc_info.benable) { /* the provision discovery request frame is trigger to send or not */
@@ -1857,7 +1852,7 @@ static void pre_tx_scan_timer_process(void *FunctionContext)
DBG_88E("[%s] p2p_state is %d, ignore!!\n", __func__, rtw_p2p_state(pwdinfo));
}
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static void find_phase_timer_process(void *FunctionContext)
@@ -1967,7 +1962,7 @@ void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
- pwdinfo->listen_dwell = (u8) ((rtw_get_current_time() % 3) + 1);
+ pwdinfo->listen_dwell = (u8) ((jiffies % 3) + 1);
_rtw_memset(&pwdinfo->tx_prov_disc_info, 0x00, sizeof(struct tx_provdisc_req_info));
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_NONE;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index b45461f..b5db22c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -122,7 +122,7 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
bool ret = false;
- if (adapter->pwrctrlpriv.ips_deny_time >= rtw_get_current_time())
+ if (adapter->pwrctrlpriv.ips_deny_time >= jiffies)
goto exit;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
@@ -285,7 +285,7 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- curr_time = rtw_get_current_time();
+ curr_time = jiffies;
delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
if (delta_time < LPS_DELAY_TIME)
@@ -379,7 +379,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
s32 err = 0;
- start_time = rtw_get_current_time();
+ start_time = jiffies;
while (1) {
rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
if (bAwake)
@@ -396,7 +396,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
DBG_88E("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
break;
}
- rtw_usleep_os(100);
+ msleep(1);
}
return err;
@@ -522,17 +522,6 @@ _func_enter_;
_func_exit_;
}
-void rtw_free_pwrctrl_priv(struct adapter *adapter)
-{
- struct pwrctrl_priv *pwrctrlpriv = &adapter->pwrctrlpriv;
-
-_func_enter_;
-
- _free_pwrlock(&pwrctrlpriv->lock);
-
-_func_exit_;
-}
-
u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
{
u8 bResult = true;
@@ -545,7 +534,7 @@ u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_i
inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ms);
+ pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ms);
}
/*
@@ -561,15 +550,15 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ret = _SUCCESS;
- if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+ if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
{
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
if (pwrpriv->ps_processing) {
DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
- rtw_msleep_os(10);
+ msleep(10);
if (pwrpriv->ps_processing)
DBG_88E("%s wait ps_processing timeout\n", __func__);
else
@@ -616,8 +605,8 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
exit:
- if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+ if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 9f0f30f7..c9c1806 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -49,7 +49,7 @@ _func_enter_;
_rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
- _rtw_spinlock_init(&psta_recvpriv->lock);
+ spin_lock_init(&psta_recvpriv->lock);
_rtw_init_queue(&psta_recvpriv->defrag_q);
@@ -65,7 +65,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
int res = _SUCCESS;
_func_enter_;
- _rtw_spinlock_init(&precvpriv->lock);
+ spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
_rtw_init_queue(&precvpriv->recv_pending_queue);
@@ -102,7 +102,7 @@ _func_enter_;
}
precvpriv->rx_pending_cnt = 1;
- _rtw_init_sema(&precvpriv->allrxreturnevt, 0);
+ sema_init(&precvpriv->allrxreturnevt, 0);
res = rtw_hal_init_recv_priv(padapter);
@@ -118,15 +118,6 @@ _func_exit_;
return res;
}
-static void rtw_mfree_recv_priv_lock(struct recv_priv *precvpriv)
-{
- _rtw_spinlock_free(&precvpriv->lock);
- _rtw_spinlock_free(&precvpriv->free_recv_queue.lock);
- _rtw_spinlock_free(&precvpriv->recv_pending_queue.lock);
-
- _rtw_spinlock_free(&precvpriv->free_recv_buf_queue.lock);
-}
-
void _rtw_free_recv_priv (struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
@@ -135,8 +126,6 @@ _func_enter_;
rtw_free_uc_swdec_pending_queue(padapter);
- rtw_mfree_recv_priv_lock(precvpriv);
-
rtw_os_recv_resource_free(precvpriv);
if (precvpriv->pallocated_frame_buf) {
@@ -181,14 +170,13 @@ _func_exit_;
union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
{
- unsigned long irqL;
union recv_frame *precvframe;
- _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_lock_bh(&pfree_recv_queue->lock);
precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
- _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_unlock_bh(&pfree_recv_queue->lock);
return precvframe;
}
@@ -203,7 +191,6 @@ void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpri
int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
{
- unsigned long irqL;
struct adapter *padapter;
struct recv_priv *precvpriv;
@@ -217,7 +204,7 @@ _func_enter_;
precvframe->u.hdr.pkt = NULL;
}
- _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_lock_bh(&pfree_recv_queue->lock);
rtw_list_delete(&(precvframe->u.hdr.list));
@@ -230,7 +217,7 @@ _func_enter_;
precvpriv->free_recvframe_cnt++;
}
- _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_unlock_bh(&pfree_recv_queue->lock);
_func_exit_;
@@ -260,11 +247,10 @@ _func_exit_;
int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
{
int ret;
- unsigned long irqL;
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
ret = _rtw_enqueue_recvframe(precvframe, queue);
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
return ret;
}
@@ -316,14 +302,12 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
{
- unsigned long irqL;
-
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
rtw_list_delete(&precvbuf->list);
rtw_list_insert_head(&precvbuf->list, get_list_head(queue));
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
return _SUCCESS;
}
@@ -331,12 +315,12 @@ int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue
int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
{
unsigned long irqL;
- _enter_critical_ex(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
rtw_list_delete(&precvbuf->list);
rtw_list_insert_tail(&precvbuf->list, get_list_head(queue));
- _exit_critical_ex(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
return _SUCCESS;
}
@@ -346,7 +330,7 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
struct recv_buf *precvbuf;
struct list_head *plist, *phead;
- _enter_critical_ex(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
if (_rtw_queue_empty(queue)) {
precvbuf = NULL;
@@ -360,7 +344,7 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
rtw_list_delete(&precvbuf->list);
}
- _exit_critical_ex(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
return precvbuf;
}
@@ -1108,11 +1092,10 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
}
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
- unsigned long irqL;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
- _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -1133,10 +1116,10 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
pxmitframe->attrib.triggered = 1;
- _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe) == true)
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_lock_bh(&psta->sleep_q.lock);
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -1165,7 +1148,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
}
}
- _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta->sleep_q.lock);
}
}
@@ -1943,7 +1926,6 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
{
- unsigned long irql;
int retval = _SUCCESS;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
@@ -1984,7 +1966,7 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
}
}
- _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_lock_bh(&ppending_recvframe_queue->lock);
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
("recv_indicatepkt_reorder: indicate=%d seq=%d\n",
@@ -1994,7 +1976,7 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
rtw_recv_indicatepkt(padapter, prframe);
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
goto _success_exit;
}
@@ -2016,9 +1998,9 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
/* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
} else {
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
_cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
}
@@ -2028,14 +2010,13 @@ _success_exit:
_err_exit:
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
return _FAIL;
}
void rtw_reordering_ctrl_timeout_handler(void *pcontext)
{
- unsigned long irql;
struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -2043,12 +2024,12 @@ void rtw_reordering_ctrl_timeout_handler(void *pcontext)
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
return;
- _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_lock_bh(&ppending_recvframe_queue->lock);
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true)
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
}
static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index 298f754..ee20d4a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -25,7 +25,7 @@ void sreset_init_value(struct adapter *padapter)
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct sreset_priv *psrtpriv = &pHalData->srestpriv;
- _rtw_mutex_init(&psrtpriv->silentreset_mutex);
+ mutex_init(&psrtpriv->silentreset_mutex);
psrtpriv->silent_reset_inprogress = false;
psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
psrtpriv->last_tx_time = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index cd3c9a7..02e1e1f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -31,7 +31,7 @@ static void _rtw_init_stainfo(struct sta_info *psta)
_func_enter_;
_rtw_memset((u8 *)psta, 0, sizeof (struct sta_info));
- _rtw_spinlock_init(&psta->lock);
+ spin_lock_init(&psta->lock);
_rtw_init_listhead(&psta->list);
_rtw_init_listhead(&psta->hash_list);
_rtw_init_queue(&psta->sleep_q);
@@ -89,7 +89,7 @@ _func_enter_;
_rtw_init_queue(&pstapriv->free_sta_queue);
- _rtw_spinlock_init(&pstapriv->sta_hash_lock);
+ spin_lock_init(&pstapriv->sta_hash_lock);
pstapriv->asoc_sta_count = 0;
_rtw_init_queue(&pstapriv->sleep_q);
@@ -114,8 +114,8 @@ _func_enter_;
_rtw_init_listhead(&pstapriv->asoc_list);
_rtw_init_listhead(&pstapriv->auth_list);
- _rtw_spinlock_init(&pstapriv->asoc_list_lock);
- _rtw_spinlock_init(&pstapriv->auth_list_lock);
+ spin_lock_init(&pstapriv->asoc_list_lock);
+ spin_lock_init(&pstapriv->auth_list_lock);
pstapriv->asoc_list_cnt = 0;
pstapriv->auth_list_cnt = 0;
@@ -148,56 +148,15 @@ inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int
return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
}
-void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv);
-void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv)
-{
-_func_enter_;
-
- _rtw_spinlock_free(&psta_xmitpriv->lock);
-
- _rtw_spinlock_free(&(psta_xmitpriv->be_q.sta_pending.lock));
- _rtw_spinlock_free(&(psta_xmitpriv->bk_q.sta_pending.lock));
- _rtw_spinlock_free(&(psta_xmitpriv->vi_q.sta_pending.lock));
- _rtw_spinlock_free(&(psta_xmitpriv->vo_q.sta_pending.lock));
-_func_exit_;
-}
-
-static void _rtw_free_sta_recv_priv_lock(struct sta_recv_priv *psta_recvpriv)
-{
-_func_enter_;
-
- _rtw_spinlock_free(&psta_recvpriv->lock);
-
- _rtw_spinlock_free(&(psta_recvpriv->defrag_q.lock));
-
-_func_exit_;
-}
-
-void rtw_mfree_stainfo(struct sta_info *psta);
-void rtw_mfree_stainfo(struct sta_info *psta)
-{
-_func_enter_;
-
- if (&psta->lock != NULL)
- _rtw_spinlock_free(&psta->lock);
-
- _rtw_free_sta_xmit_priv_lock(&psta->sta_xmitpriv);
- _rtw_free_sta_recv_priv_lock(&psta->sta_recvpriv);
-
-_func_exit_;
-}
-
/* this function is used to free the memory of lock || sema for all stainfos */
-void rtw_mfree_all_stainfo(struct sta_priv *pstapriv);
-void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
+static void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
{
- unsigned long irql;
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
_func_enter_;
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
phead = get_list_head(&pstapriv->free_sta_queue);
plist = get_next(phead);
@@ -205,39 +164,20 @@ _func_enter_;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
psta = LIST_CONTAINOR(plist, struct sta_info , list);
plist = get_next(plist);
-
- rtw_mfree_stainfo(psta);
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
_func_exit_;
}
static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
{
-#ifdef CONFIG_88EU_AP_MODE
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-#endif
-
rtw_mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
-
- _rtw_spinlock_free(&pstapriv->free_sta_queue.lock);
-
- _rtw_spinlock_free(&pstapriv->sta_hash_lock);
- _rtw_spinlock_free(&pstapriv->wakeup_q.lock);
- _rtw_spinlock_free(&pstapriv->sleep_q.lock);
-
-#ifdef CONFIG_88EU_AP_MODE
- _rtw_spinlock_free(&pstapriv->asoc_list_lock);
- _rtw_spinlock_free(&pstapriv->auth_list_lock);
- _rtw_spinlock_free(&pacl_list->acl_node_q.lock);
-#endif
}
u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
{
- unsigned long irql;
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
@@ -246,7 +186,7 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
_func_enter_;
if (pstapriv) {
/* delete all reordering_ctrl_timer */
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
plist = get_next(phead);
@@ -262,7 +202,7 @@ _func_enter_;
}
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
/*===============================*/
rtw_mfree_sta_priv_lock(pstapriv);
@@ -277,7 +217,6 @@ _func_exit_;
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- unsigned long irql, irql2;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -290,15 +229,15 @@ _func_enter_;
pfree_sta_queue = &pstapriv->free_sta_queue;
- _enter_critical_bh(&(pfree_sta_queue->lock), &irql);
+ spin_lock_bh(&(pfree_sta_queue->lock));
if (_rtw_queue_empty(pfree_sta_queue) == true) {
- _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ spin_unlock_bh(&pfree_sta_queue->lock);
psta = NULL;
} else {
psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
rtw_list_delete(&(psta->list));
- _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ spin_unlock_bh(&pfree_sta_queue->lock);
_rtw_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
@@ -310,13 +249,13 @@ _func_enter_;
}
phash_list = &(pstapriv->sta_hash[index]);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_list_insert_tail(&psta->hash_list, phash_list);
pstapriv->asoc_sta_count++;
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
/* Commented by Albert 2009/08/13 */
/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
@@ -368,7 +307,6 @@ _func_exit_;
u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta)
{
int i;
- unsigned long irql0;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
struct sta_xmit_priv *pstaxmitpriv;
@@ -384,7 +322,7 @@ _func_enter_;
pstaxmitpriv = &psta->sta_xmitpriv;
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
psta->sleepq_len = 0;
@@ -405,7 +343,7 @@ _func_enter_;
rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
rtw_list_delete(&psta->hash_list);
RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("\n free number_%d stainfo with hwaddr=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", pstapriv->asoc_sta_count , psta->hwaddr[0], psta->hwaddr[1], psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4], psta->hwaddr[5]));
@@ -419,7 +357,6 @@ _func_enter_;
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
- unsigned long irql;
struct list_head *phead, *plist;
union recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
@@ -431,7 +368,7 @@ _func_enter_;
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_lock_bh(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
@@ -446,7 +383,7 @@ _func_enter_;
rtw_free_recvframe(prframe, pfree_recv_queue);
}
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
}
if (!(psta->state & WIFI_AP_STATE))
@@ -454,12 +391,12 @@ _func_enter_;
#ifdef CONFIG_88EU_AP_MODE
- _enter_critical_bh(&pstapriv->auth_list_lock, &irql0);
+ spin_lock_bh(&pstapriv->auth_list_lock);
if (!rtw_is_list_empty(&psta->auth_list)) {
rtw_list_delete(&psta->auth_list);
pstapriv->auth_list_cnt--;
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irql0);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
psta->expire_to = 0;
@@ -485,9 +422,9 @@ _func_enter_;
#endif /* CONFIG_88EU_AP_MODE */
- _enter_critical_bh(&(pfree_sta_queue->lock), &irql0);
+ spin_lock_bh(&(pfree_sta_queue->lock));
rtw_list_insert_tail(&psta->list, get_list_head(pfree_sta_queue));
- _exit_critical_bh(&(pfree_sta_queue->lock), &irql0);
+ spin_unlock_bh(&pfree_sta_queue->lock);
exit:
@@ -499,7 +436,6 @@ _func_exit_;
/* free all stainfo which in sta_hash[all] */
void rtw_free_all_stainfo(struct adapter *padapter)
{
- unsigned long irql;
struct list_head *plist, *phead;
s32 index;
struct sta_info *psta = NULL;
@@ -511,7 +447,7 @@ _func_enter_;
if (pstapriv->asoc_sta_count == 1)
goto exit;
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
@@ -527,7 +463,7 @@ _func_enter_;
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
exit:
@@ -537,7 +473,6 @@ _func_exit_;
/* any station allocated can be searched by hash list */
struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- unsigned long irql;
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
u32 index;
@@ -556,7 +491,7 @@ _func_enter_;
index = wifi_mac_hash(addr);
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &(pstapriv->sta_hash[index]);
plist = get_next(phead);
@@ -572,7 +507,7 @@ _func_enter_;
plist = get_next(plist);
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
_func_exit_;
return psta;
}
@@ -617,7 +552,6 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
u8 res = true;
#ifdef CONFIG_88EU_AP_MODE
- unsigned long irql;
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
u8 match = false;
@@ -625,7 +559,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- _enter_critical_bh(&(pacl_node_q->lock), &irql);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
while ((!rtw_end_of_queue_search(phead, plist))) {
@@ -639,7 +573,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
}
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irql);
+ spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
res = (match) ? false : true;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index a594e51..24182fb 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -43,7 +43,7 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
_func_enter_;
_rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
- _rtw_spinlock_init(&psta_xmitpriv->lock);
+ spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
@@ -67,9 +67,9 @@ _func_enter_;
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
- _rtw_spinlock_init(&pxmitpriv->lock);
- _rtw_init_sema(&pxmitpriv->xmit_sema, 0);
- _rtw_init_sema(&pxmitpriv->terminate_xmitthread_sema, 0);
+ spin_lock_init(&pxmitpriv->lock);
+ sema_init(&pxmitpriv->xmit_sema, 0);
+ sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
Please insert all the queue initializaiton using _rtw_init_queue below
@@ -153,7 +153,7 @@ _func_enter_;
/* Tx buf allocation may fail sometimes, so sleep and retry. */
res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL) {
- rtw_msleep_os(10);
+ msleep(10);
res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL) {
goto exit;
@@ -210,7 +210,7 @@ _func_enter_;
pxmitpriv->txirp_cnt = 1;
- _rtw_init_sema(&(pxmitpriv->tx_retevt), 0);
+ sema_init(&(pxmitpriv->tx_retevt), 0);
/* per AC pending irp */
pxmitpriv->beq_cnt = 0;
@@ -219,7 +219,7 @@ _func_enter_;
pxmitpriv->voq_cnt = 0;
pxmitpriv->ack_tx = false;
- _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
+ mutex_init(&pxmitpriv->ack_tx_mutex);
rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
rtw_hal_init_xmit_priv(padapter);
@@ -231,23 +231,6 @@ _func_exit_;
return res;
}
-static void rtw_mfree_xmit_priv_lock (struct xmit_priv *pxmitpriv)
-{
- _rtw_spinlock_free(&pxmitpriv->lock);
- _rtw_free_sema(&pxmitpriv->xmit_sema);
- _rtw_free_sema(&pxmitpriv->terminate_xmitthread_sema);
-
- _rtw_spinlock_free(&pxmitpriv->be_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->bk_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->vi_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->vo_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->bm_pending.lock);
-
- _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock);
- _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock);
- _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock);
-}
-
void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
{
int i;
@@ -261,8 +244,6 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
rtw_hal_free_xmit_priv(padapter);
- rtw_mfree_xmit_priv_lock(pxmitpriv);
-
if (pxmitpriv->pxmit_frame_buf == NULL)
goto out;
@@ -284,8 +265,6 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
/* free xmit extension buff */
- _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock);
-
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
for (i = 0; i < num_xmit_extbuf; i++) {
rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
@@ -298,7 +277,7 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
rtw_free_hwxmits(padapter);
- _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
+ mutex_destroy(&pxmitpriv->ack_tx_mutex);
out:
@@ -685,7 +664,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
_func_enter_;
- hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);;
+ hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
/* encode mic code */
@@ -704,7 +683,7 @@ _func_enter_;
} else {
if (_rtw_memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16) == true) {
/* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* rtw_msleep_os(10); */
+ /* msleep(10); */
return _FAIL;
}
/* start to calculate the mic code */
@@ -827,7 +806,7 @@ s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pat
u8 qos_option = false;
int res = _SUCCESS;
- u16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_ctl;
struct sta_info *psta;
@@ -1273,7 +1252,7 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
_func_enter_;
- _enter_critical(&pfree_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_queue->lock, irql);
if (_rtw_queue_empty(pfree_queue) == true) {
pxmitbuf = NULL;
@@ -1299,7 +1278,7 @@ _func_enter_;
}
}
- _exit_critical(&pfree_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_queue->lock, irql);
_func_exit_;
@@ -1316,14 +1295,14 @@ _func_enter_;
if (pxmitbuf == NULL)
return _FAIL;
- _enter_critical(&pfree_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_queue->lock, irql);
rtw_list_delete(&pxmitbuf->list);
rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
- _exit_critical(&pfree_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_queue->lock, irql);
_func_exit_;
@@ -1341,7 +1320,7 @@ _func_enter_;
/* DBG_88E("+rtw_alloc_xmitbuf\n"); */
- _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql);
if (_rtw_queue_empty(pfree_xmitbuf_queue) == true) {
pxmitbuf = NULL;
@@ -1363,7 +1342,7 @@ _func_enter_;
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
}
- _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
_func_exit_;
@@ -1387,14 +1366,14 @@ _func_enter_;
if (pxmitbuf->ext_tag) {
rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
} else {
- _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql);
rtw_list_delete(&pxmitbuf->list);
rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
- _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
}
_func_exit_;
@@ -1422,14 +1401,13 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
pfree_xmit_queue
*/
- unsigned long irql;
struct xmit_frame *pxframe = NULL;
struct list_head *plist, *phead;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
_func_enter_;
- _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_lock_bh(&pfree_xmit_queue->lock);
if (_rtw_queue_empty(pfree_xmit_queue) == true) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
@@ -1464,7 +1442,7 @@ _func_enter_;
pxframe->ack_report = 0;
}
- _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_unlock_bh(&pfree_xmit_queue->lock);
_func_exit_;
@@ -1473,7 +1451,6 @@ _func_exit_;
s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
{
- unsigned long irql;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
@@ -1485,7 +1462,7 @@ _func_enter_;
goto exit;
}
- _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_lock_bh(&pfree_xmit_queue->lock);
rtw_list_delete(&pxmitframe->list);
@@ -1499,7 +1476,7 @@ _func_enter_;
pxmitpriv->free_xmitframe_cnt++;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
- _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_unlock_bh(&pfree_xmit_queue->lock);
if (pndis_pkt)
rtw_os_pkt_complete(padapter, pndis_pkt);
@@ -1513,13 +1490,12 @@ _func_exit_;
void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
{
- unsigned long irql;
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
_func_enter_;
- _enter_critical_bh(&(pframequeue->lock), &irql);
+ spin_lock_bh(&(pframequeue->lock));
phead = get_list_head(pframequeue);
plist = get_next(phead);
@@ -1531,7 +1507,7 @@ _func_enter_;
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
- _exit_critical_bh(&(pframequeue->lock), &irql);
+ spin_unlock_bh(&(pframequeue->lock));
_func_exit_;
}
@@ -1570,7 +1546,6 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, str
struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry)
{
- unsigned long irql0;
struct list_head *sta_plist, *sta_phead;
struct hw_xmit *phwxmit;
struct tx_servq *ptxservq = NULL;
@@ -1591,7 +1566,7 @@ _func_enter_;
inx[j] = pxmitpriv->wmm_para_seq[j];
}
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
for (i = 0; i < entry; i++) {
phwxmit = phwxmit_i + inx[i];
@@ -1619,7 +1594,7 @@ _func_enter_;
}
}
exit:
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
_func_exit_;
return pxmitframe;
}
@@ -1668,7 +1643,6 @@ _func_exit_;
*/
s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- /* unsigned long irql0; */
u8 ac_index;
struct sta_info *psta;
struct tx_servq *ptxservq;
@@ -1754,7 +1728,6 @@ _func_exit_;
static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
- unsigned long irql;
int res, is_vlan_tag = 0, i, do_nat25 = 1;
unsigned short vlan_hdr = 0;
void *br_port = NULL;
@@ -1762,7 +1735,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
rcu_read_lock();
br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
rcu_read_unlock();
- _enter_critical_bh(&padapter->br_ext_lock, &irql);
+ spin_lock_bh(&padapter->br_ext_lock);
if (!(skb->data[0] & 1) && br_port &&
memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
*((__be16 *)(skb->data+MACADDRLEN*2)) != __constant_htons(ETH_P_8021Q) &&
@@ -1770,7 +1743,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
!memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
padapter->scdb_entry->ageing_timer = jiffies;
- _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ spin_unlock_bh(&padapter->br_ext_lock);
} else {
if (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_8021Q)) {
is_vlan_tag = 1;
@@ -1803,7 +1776,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
}
}
}
- _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ spin_unlock_bh(&padapter->br_ext_lock);
if (do_nat25) {
if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
struct sk_buff *newskb;
@@ -1930,9 +1903,6 @@ static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib
*/
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
{
-#ifdef CONFIG_88EU_AP_MODE
- unsigned long irql0;
-#endif
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_frame *pxmitframe = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1972,12 +1942,12 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
do_queue_select(padapter, &pxmitframe->attrib);
#ifdef CONFIG_88EU_AP_MODE
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
return 1;
}
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
#endif
if (rtw_hal_xmit(padapter, pxmitframe) == false)
@@ -1990,7 +1960,6 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- unsigned long irql;
int ret = false;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2016,7 +1985,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
}
if (bmcst) {
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
if (pstapriv->sta_dz_bitmap) {/* if any one sta is in ps mode */
rtw_list_delete(&pxmitframe->list);
@@ -2033,12 +2002,12 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
ret = true;
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
return ret;
}
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
if (psta->state&WIFI_SLEEP_STATE) {
u8 wmmps_ac = 0;
@@ -2086,7 +2055,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
}
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
return ret;
}
@@ -2121,7 +2090,6 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irql0;
struct sta_info *psta_bmc;
struct sta_xmit_priv *pstaxmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2132,7 +2100,7 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
psta->state |= WIFI_SLEEP_STATE;
@@ -2155,19 +2123,18 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
}
void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irql;
u8 update_mask = 0, wmmps_ac = 0;
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -2218,10 +2185,10 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
}
if (psta->sleepq_len == 0) {
@@ -2240,7 +2207,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
@@ -2248,7 +2215,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
return;
if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -2268,10 +2235,10 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
}
if (psta_bmc->sleepq_len == 0) {
@@ -2281,7 +2248,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
update_mask |= BIT(1);
}
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
}
if (update_mask)
@@ -2290,13 +2257,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irql;
u8 wmmps_ac = 0;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -2355,7 +2321,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
}
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
}
#endif
@@ -2363,7 +2329,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
{
sctx->timeout_ms = timeout_ms;
- sctx->submit_time = rtw_get_current_time();
+ sctx->submit_time = jiffies;
init_completion(&sctx->done);
sctx->status = RTW_SCTX_SUBMITTED;
}
@@ -2424,7 +2390,7 @@ int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
- pack_tx_ops->submit_time = rtw_get_current_time();
+ pack_tx_ops->submit_time = jiffies;
pack_tx_ops->timeout_ms = timeout_ms;
pack_tx_ops->status = RTW_SCTX_SUBMITTED;
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index aaa2617..3df33bc 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -529,9 +529,7 @@ ODM_RASupport_Init(
{
ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>ODM_RASupport_Init()\n"));
- /* 2012/02/14 MH Be noticed, the init must be after IC type is recognized!!!!! */
- if (dm_odm->SupportICType == ODM_RTL8188E)
- dm_odm->RaSupport88E = true;
+ dm_odm->RaSupport88E = true;
}
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
index 480c810..17c6411 100644
--- a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
@@ -211,7 +211,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
else if (v1 == 0xf9)
rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
else
- rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ rtw_IOL_append_WRF_cmd(pxmit_frame, RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
} else {
odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
}
@@ -247,7 +247,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
else if (v1 == 0xf9)
rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
else
- rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ rtw_IOL_append_WRF_cmd(pxmit_frame, RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
} else {
odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
}
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
index 8a7947d..15e8e3f 100644
--- a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -173,7 +173,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
("===>dm_TXPowerTrackingCallback_ThermalMeter_8188E txpowercontrol %d\n",
dm_odm->RFCalibrateInfo.TxPowerTrackControl));
- ThermalValue = (u8)ODM_GetRFReg(dm_odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+ ThermalValue = (u8)PHY_QueryRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("Readback Thermal Meter = 0x%x pre thermal meter 0x%x EEPROMthermalmeter 0x%x\n",
@@ -186,7 +186,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
if (ThermalValue) {
/* Query OFDM path A default setting */
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ ele_D = PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
OFDM_index_old[0] = (u8)i;
@@ -200,7 +200,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* Query OFDM path B default setting */
if (is2t) {
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ ele_D = PHY_QueryBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
OFDM_index_old[1] = (u8)i;
@@ -428,17 +428,17 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* wtite new elements A, C, D to regC88 and regC9C, element B is always 0 */
value32 = (ele_D<<22) | ((ele_C&0x3F)<<16) | ele_A;
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+ PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
value32 = (ele_C&0x000003C0)>>6;
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+ PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
value32 = ((X * ele_D)>>7)&0x01;
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, value32);
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT28, value32);
} else {
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, 0x00);
+ PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
+ PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT28, 0x00);
}
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -449,8 +449,8 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- ODM_GetBBReg(dm_odm, 0xc80, bMaskDWord), ODM_GetBBReg(dm_odm,
- 0xc94, bMaskDWord), ODM_GetRFReg(dm_odm, RF_PATH_A, 0x24, bRFRegOffsetMask)));
+ PHY_QueryBBReg(Adapter, 0xc80, bMaskDWord), PHY_QueryBBReg(Adapter,
+ 0xc94, bMaskDWord), PHY_QueryRFReg(Adapter, RF_PATH_A, 0x24, bRFRegOffsetMask)));
}
}
@@ -485,33 +485,33 @@ phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
/* 1 Tx IQK */
/* path-A IQK setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n"));
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
+ PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+ PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
/* One shot, path A LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE94 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
- regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
if (!(regeac & BIT28) &&
@@ -533,51 +533,51 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
/* PA,PAD off */
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
+ PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
+ PHY_SetRFReg(adapt, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
+ PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("Delay %d ms for One shot, path A LOK & IQK.\n",
IQK_DELAY_TIME_88E));
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xeac = 0x%x\n", regeac));
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE94 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xe94 = 0x%x\n", regE94));
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xe9c = 0x%x\n", regE9C));
@@ -589,55 +589,55 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
return result;
u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, u4tmp);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", ODM_GetBBReg(dm_odm, rTx_IQK, bMaskDWord), u4tmp));
+ PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, u4tmp);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", PHY_QueryBBReg(adapt, rTx_IQK, bMaskDWord), u4tmp));
/* 1 RX IQK */
/* modify RXIQK mode table */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x01004800);
+ PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x01004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
+ PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
/* LO calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE94 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
- regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
/* reload RF 0xdf */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
if (!(regeac & BIT27) && /* if Tx is OK, check whether Rx is OK */
(((regEA4 & 0x03FF0000)>>16) != 0x132) &&
@@ -660,29 +660,29 @@ phy_PathB_IQK_8188E(struct adapter *adapt)
/* One shot, path B LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
+ PHY_SetBBReg(adapt, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("Delay %d ms for One shot, path B LOK & IQK.\n",
IQK_DELAY_TIME_88E));
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xeac = 0x%x\n", regeac));
- regeb4 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord);
+ regeb4 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_B, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xeb4 = 0x%x\n", regeb4));
- regebc = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord);
+ regebc = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_B, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xebc = 0x%x\n", regebc));
- regec4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord);
+ regec4 = PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_B_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xec4 = 0x%x\n", regec4));
- regecc = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord);
+ regecc = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_B_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xecc = 0x%x\n", regecc));
@@ -715,7 +715,7 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
if (final_candidate == 0xFF) {
return;
} else if (iqkok) {
- Oldval_0 = (ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+ Oldval_0 = (PHY_QueryBBReg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
X = result[final_candidate][0];
if ((X & 0x00000200) != 0)
@@ -724,9 +724,9 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n",
X, TX0_A, Oldval_0));
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+ PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
Y = result[final_candidate][1];
if ((Y & 0x00000200) != 0)
@@ -734,10 +734,10 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
TX0_C = (Y * Oldval_0) >> 8;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C));
- ODM_SetBBReg(dm_odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
+ PHY_SetBBReg(adapt, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
+ PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
if (txonly) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("patha_fill_iqk only Tx OK\n"));
@@ -745,13 +745,13 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
}
reg = result[final_candidate][2];
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0x3FF, reg);
reg = result[final_candidate][3] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0xFC00, reg);
reg = (result[final_candidate][3] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ PHY_SetBBReg(adapt, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
}
}
@@ -768,16 +768,16 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
if (final_candidate == 0xFF) {
return;
} else if (iqkok) {
- Oldval_1 = (ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+ Oldval_1 = (PHY_QueryBBReg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
X = result[final_candidate][4];
if ((X & 0x00000200) != 0)
X = X | 0xFFFFFC00;
TX1_A = (X * Oldval_1) >> 8;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A));
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
+ PHY_SetBBReg(adapt, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
Y = result[final_candidate][5];
if ((Y & 0x00000200) != 0)
@@ -785,22 +785,22 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
TX1_C = (Y * Oldval_1) >> 8;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C));
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
+ PHY_SetBBReg(adapt, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
+ PHY_SetBBReg(adapt, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
if (txonly)
return;
reg = result[final_candidate][6];
- ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
reg = result[final_candidate][7] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
reg = (result[final_candidate][7] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_AGCRSSITable, 0x0000F000, reg);
+ PHY_SetBBReg(adapt, rOFDM0_AGCRSSITable, 0x0000F000, reg);
}
}
@@ -824,7 +824,7 @@ void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
for (i = 0; i < RegisterNum; i++) {
- ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
+ ADDABackup[i] = PHY_QueryBBReg(adapt, ADDAReg[i], bMaskDWord);
}
}
@@ -852,7 +852,7 @@ static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n"));
for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+ PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, ADDABackup[i]);
}
static void
@@ -890,13 +890,13 @@ _PHY_PathADDAOn(
pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
if (!is2t) {
pathOn = 0x0bdb25a0;
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
+ PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
} else {
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, pathOn);
+ PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, pathOn);
}
for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, pathOn);
+ PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, pathOn);
}
void
@@ -930,9 +930,9 @@ _PHY_PathAStandBy(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A standby mode!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x0);
- ODM_SetBBReg(dm_odm, 0x840, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x0);
+ PHY_SetBBReg(adapt, 0x840, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
}
static void _PHY_PIModeSwitch(
@@ -947,8 +947,8 @@ static void _PHY_PIModeSwitch(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("BB Switch to %s mode!\n", (PIMode ? "PI" : "SI")));
mode = PIMode ? 0x01000100 : 0x01000000;
- ODM_SetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
+ PHY_SetBBReg(adapt, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
+ PHY_SetBBReg(adapt, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
}
static bool phy_SimularityCompare_8188E(
@@ -1097,7 +1097,7 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
_PHY_PathADDAOn(adapt, ADDA_REG, true, is2t);
if (t == 0)
- dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)ODM_GetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, BIT(8));
+ dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)PHY_QueryBBReg(adapt, rFPGA0_XA_HSSIParameter1, BIT(8));
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
/* Switch BB to PI mode to do IQ Calibration. */
@@ -1105,19 +1105,19 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
}
/* BB setting */
- ODM_SetBBReg(dm_odm, rFPGA0_RFMOD, BIT24, 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
- ODM_SetBBReg(dm_odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
- ODM_SetBBReg(dm_odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+ PHY_SetBBReg(adapt, rFPGA0_RFMOD, BIT24, 0x00);
+ PHY_SetBBReg(adapt, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ PHY_SetBBReg(adapt, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ PHY_SetBBReg(adapt, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
+ PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
+ PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
+ PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
+ PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
if (is2t) {
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(adapt, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
}
/* MAC settings */
@@ -1125,23 +1125,23 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
/* Page B init */
/* AP or IQK */
- ODM_SetBBReg(dm_odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ PHY_SetBBReg(adapt, rConfig_AntA, bMaskDWord, 0x0f600000);
if (is2t)
- ODM_SetBBReg(dm_odm, rConfig_AntB, bMaskDWord, 0x0f600000);
+ PHY_SetBBReg(adapt, rConfig_AntB, bMaskDWord, 0x0f600000);
/* IQ calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK setting!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
for (i = 0; i < retryCount; i++) {
PathAOK = phy_PathA_IQK_8188E(adapt, is2t);
if (PathAOK == 0x01) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n"));
- result[t][0] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
- result[t][1] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][0] = (PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
break;
}
}
@@ -1150,8 +1150,8 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
PathAOK = phy_PathA_RxIQK(adapt, is2t);
if (PathAOK == 0x03) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n"));
- result[t][2] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
- result[t][3] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][2] = (PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][3] = (PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
break;
} else {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n"));
@@ -1172,15 +1172,15 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
PathBOK = phy_PathB_IQK_8188E(adapt);
if (PathBOK == 0x03) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK Success!!\n"));
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
- result[t][6] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
- result[t][7] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][4] = (PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(adapt, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][6] = (PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][7] = (PHY_QueryBBReg(adapt, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
break;
} else if (i == (retryCount - 1) && PathBOK == 0x01) { /* Tx IQK OK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Only Tx IQK Success!!\n"));
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][4] = (PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(adapt, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
}
}
@@ -1191,7 +1191,7 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
/* Back to BB mode, load original value */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0);
if (t != 0) {
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
@@ -1208,13 +1208,13 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
/* Restore RX initial gain */
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
+ PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
if (is2t)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+ PHY_SetBBReg(adapt, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
/* load 0xe30 IQC default value */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
}
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8188E() <==\n"));
}
@@ -1245,31 +1245,31 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
/* 2. Set RF mode = standby mode */
/* Path-A */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
+ PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
}
/* 3. Read RF reg18 */
LC_Cal = PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
/* 4. Set LC calibration begin bit15 */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
- ODM_sleep_ms(100);
+ msleep(100);
/* Restore original situation */
if ((tmpreg&0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
} else {
/* Deal with Packet TX case */
ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
@@ -1447,7 +1447,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
return;
while (*(dm_odm->pbScanInProcess) && timecount < timeout) {
- ODM_delay_ms(50);
+ mdelay(50);
timecount += 50;
}
@@ -1475,19 +1475,19 @@ static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2
u8 u1btmp;
u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT7;
ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFParameter, BIT13, 0x01);
+ PHY_SetBBReg(adapt, rFPGA0_XAB_RFParameter, BIT13, 0x01);
}
if (is2t) { /* 92C */
if (main)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); /* 92C_Path_A */
+ PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); /* 92C_Path_A */
else
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); /* BT */
+ PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); /* BT */
} else { /* 88C */
if (main)
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); /* Main */
+ PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); /* Main */
else
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); /* Aux */
+ PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); /* Aux */
}
}
diff --git a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
index 5700dbc..50f9513 100644
--- a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
@@ -100,7 +100,7 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
poll_bit = true;
else
- rtw_udelay_os(10);
+ udelay(10);
if (poll_count++ > max_poll_count) {
DBG_88E("Fail to polling Offset[%#x]\n", offset);
@@ -111,9 +111,9 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
case PWR_CMD_DELAY:
RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_DELAY\n"));
if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
- rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
else
- rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
break;
case PWR_CMD_END:
/* When this command is parsed, end the process */
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 285475f..3555ffa 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -182,22 +182,16 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
odm_DIGInit(pDM_Odm);
odm_RateAdaptiveMaskInit(pDM_Odm);
- if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
- ;
- } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
- odm_DynamicBBPowerSavingInit(pDM_Odm);
- odm_DynamicTxPowerInit(pDM_Odm);
- odm_TXPowerTrackingInit(pDM_Odm);
- ODM_EdcaTurboInit(pDM_Odm);
- ODM_RAInfo_Init_all(pDM_Odm);
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_InitHybridAntDiv(pDM_Odm);
- else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
- odm_SwAntDivInit(pDM_Odm);
- }
+ odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_DynamicTxPowerInit(pDM_Odm);
+ odm_TXPowerTrackingInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
}
/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
@@ -206,27 +200,14 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
{
/* 2012.05.03 Luke: For all IC series */
- odm_GlobalAdapterCheck();
odm_CmnInfoHook_Debug(pDM_Odm);
odm_CmnInfoUpdate_Debug(pDM_Odm);
odm_CommonInfoSelfUpdate(pDM_Odm);
odm_FalseAlarmCounterStatistics(pDM_Odm);
odm_RSSIMonitorCheck(pDM_Odm);
- /* For CE Platform(SPRD or Tablet) */
- /* 8723A or 8189ES platform */
- /* NeilChen--2012--08--24-- */
/* Fix Leave LPS issue */
- if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
- ((pDM_Odm->SupportICType & (ODM_RTL8723A)) ||
- (pDM_Odm->SupportICType & (ODM_RTL8188E) &&
- ((pDM_Odm->SupportInterface == ODM_ITRF_SDIO))))) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG is in LPS mode\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
- odm_DIGbyRSSI_LPS(pDM_Odm);
- } else {
- odm_DIG(pDM_Odm);
- }
+ odm_DIG(pDM_Odm);
odm_CCKPacketDetectionThresh(pDM_Odm);
if (*(pDM_Odm->pbPowerSaving))
@@ -240,17 +221,10 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
(pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
(pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
odm_HwAntDiv(pDM_Odm);
- else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
- odm_SwAntDivChkAntSwitch(pDM_Odm, SWAW_STEP_PEAK);
-
- if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
- ;
- } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- ODM_TXPowerTrackingCheck(pDM_Odm);
- odm_EdcaTurboCheck(pDM_Odm);
- odm_DynamicTxPower(pDM_Odm);
- }
- odm_dtc(pDM_Odm);
+
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+ odm_DynamicTxPower(pDM_Odm);
}
/* Init /.. Fixed HW value. Only init time. */
@@ -457,12 +431,10 @@ void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
{
- pDM_Odm->bCckHighPower = (bool) ODM_GetBBReg(pDM_Odm, 0x824, BIT9);
- pDM_Odm->RFPathRxEnable = (u8) ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
- if (pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8192D))
- pDM_Odm->AntDivType = CG_TRX_HW_ANTDIV;
- if (pDM_Odm->SupportICType & (ODM_RTL8723A))
- pDM_Odm->AntDivType = CGCS_RX_SW_ANTDIV;
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ pDM_Odm->bCckHighPower = (bool) PHY_QueryBBReg(adapter, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8) PHY_QueryBBReg(adapter, 0xc04, 0x0F);
ODM_InitDebugSetting(pDM_Odm);
}
@@ -526,9 +498,6 @@ void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *(pDM_Odm->pbScanInProcess)));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *(pDM_Odm->pbPowerSaving)));
-
- if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL))
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pOnePathCCA=%d\n", *(pDM_Odm->pOnePathCCA)));
}
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
@@ -540,53 +509,17 @@ void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min=%d\n", pDM_Odm->RSSI_Min));
}
-static int getIGIForDiff(int value_IGI)
-{
- #define ONERCCA_LOW_TH 0x30
- #define ONERCCA_LOW_DIFF 8
-
- if (value_IGI < ONERCCA_LOW_TH) {
- if ((ONERCCA_LOW_TH - value_IGI) < ONERCCA_LOW_DIFF)
- return ONERCCA_LOW_TH;
- else
- return value_IGI + ONERCCA_LOW_DIFF;
- } else {
- return value_IGI;
- }
-}
-
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
("ODM_REG(IGI_A,pDM_Odm)=0x%x, ODM_BIT(IGI,pDM_Odm)=0x%x\n",
ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
if (pDM_DigTable->CurIGValue != CurrentIGI) {
- if (pDM_Odm->SupportPlatform & (ODM_CE|ODM_MP)) {
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- } else if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
- switch (*(pDM_Odm->pOnePathCCA)) {
- case ODM_CCA_2R:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- break;
- case ODM_CCA_1R_A:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
- break;
- case ODM_CCA_1R_B:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- break;
- }
- }
+ PHY_SetBBReg(adapter, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("CurrentIGI(0x%02x).\n", CurrentIGI));
/* pDM_DigTable->PreIGValue = pDM_DigTable->CurIGValue; */
pDM_DigTable->CurIGValue = CurrentIGI;
@@ -607,9 +540,6 @@ void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
u8 bFwCurrentInPSMode = false;
u8 CurrentIGI = pDM_Odm->RSSI_Min;
- if (!(pDM_Odm->SupportICType & (ODM_RTL8723A | ODM_RTL8188E)))
- return;
-
CurrentIGI = CurrentIGI + RSSI_OFFSET_DIG;
bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
@@ -646,9 +576,10 @@ void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- pDM_DigTable->CurIGValue = (u8) ODM_GetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->CurIGValue = (u8) PHY_QueryBBReg(adapter, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
@@ -705,102 +636,47 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
return;
}
- if (pDM_Odm->SupportICType == ODM_RTL8192D) {
- if (*(pDM_Odm->pMacPhyMode) == ODM_DMSP) {
- if (*(pDM_Odm->pbMasterOfDMSP)) {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
- }
- } else {
- if (*(pDM_Odm->pBandType) == ODM_BAND_5G) {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
- }
- }
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- }
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
/* 1 Boundary Decision */
- if ((pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8723A)) &&
- ((pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) || pDM_Odm->ExtLNA)) {
- if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
- dm_dig_max = DM_DIG_MAX_AP_HP;
- dm_dig_min = DM_DIG_MIN_AP_HP;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC_HP;
- dm_dig_min = DM_DIG_MIN_NIC_HP;
- }
- DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
- } else {
- if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
- dm_dig_max = DM_DIG_MAX_AP;
- dm_dig_min = DM_DIG_MIN_AP;
- DIG_MaxOfMin = dm_dig_max;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC;
- dm_dig_min = DM_DIG_MIN_NIC;
- DIG_MaxOfMin = DM_DIG_MAX_AP;
- }
- }
+ dm_dig_max = DM_DIG_MAX_NIC;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+
if (pDM_Odm->bLinked) {
- /* 2 8723A Series, offset need to be 10 */
- if (pDM_Odm->SupportICType == (ODM_RTL8723A)) {
- /* 2 Upper Bound */
- if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
- /* 2 If BT is Concurrent, need to set Lower Bound */
- DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
- } else {
- /* 2 Modify DIG upper bound */
- if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
- pDM_DigTable->rx_gain_range_max = dm_dig_min;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
- /* 2 Modify DIG lower bound */
- if (pDM_Odm->bOneEntryOnly) {
- if (pDM_Odm->RSSI_Min < dm_dig_min)
- DIG_Dynamic_MIN = dm_dig_min;
- else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
- DIG_Dynamic_MIN = DIG_MaxOfMin;
- else
- DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
- DIG_Dynamic_MIN));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
- pDM_Odm->RSSI_Min));
- } else if ((pDM_Odm->SupportICType == ODM_RTL8188E) &&
- (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
- /* 1 Lower Bound for 88E AntDiv */
- if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
- DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
- pDM_DigTable->AntDiv_RSSI_max));
- }
- } else {
+ /* 2 Modify DIG upper bound */
+ if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
+ /* 2 Modify DIG lower bound */
+ if (pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
+ DIG_Dynamic_MIN));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
+ pDM_Odm->RSSI_Min));
+ } else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
+ /* 1 Lower Bound for 88E AntDiv */
+ if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
+ pDM_DigTable->AntDiv_RSSI_max));
}
+ } else {
+ DIG_Dynamic_MIN = dm_dig_min;
}
} else {
pDM_DigTable->rx_gain_range_max = dm_dig_max;
@@ -858,21 +734,12 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = pDM_Odm->RSSI_Min;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
} else {
- if (pDM_Odm->SupportICType == ODM_RTL8192D) {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_92D)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_92D)
- CurrentIGI = CurrentIGI + 1; /* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_92D)
- CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- } else {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- }
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG BeforeLink\n"));
@@ -916,102 +783,69 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
u32 ret_value;
struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
- if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- /* hold ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
- FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
- FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
-
- FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
-
- if (pDM_Odm->SupportICType == ODM_RTL8188E) {
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
- FalseAlmCnt->Cnt_BW_USC = ((ret_value&0xffff0000)>>16);
- }
-
- /* hold cck counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
- FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
- FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
-
- FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
- FalseAlmCnt->Cnt_SB_Search_fail +
- FalseAlmCnt->Cnt_Parity_Fail +
- FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail +
- FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Cck_fail);
-
- FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-
- if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
- /* reset false alarm counter registers */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 0);
- /* update ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 0); /* update page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 0); /* update page D counter */
-
- /* reset CCK CCA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 2);
- /* reset CCK FA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 2);
- }
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
- FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
- FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
- FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
- } else { /* FOR ODM_IC_11AC_SERIES */
- /* read OFDM FA counter */
- FalseAlmCnt->Cnt_Ofdm_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_11AC, bMaskLWord);
- FalseAlmCnt->Cnt_Cck_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_11AC, bMaskLWord);
- FalseAlmCnt->Cnt_all = FalseAlmCnt->Cnt_Ofdm_fail + FalseAlmCnt->Cnt_Cck_fail;
-
- /* reset OFDM FA coutner */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 0);
- /* reset CCK FA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 1);
- }
+ /* hold ofdm counter */
+ PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
+ PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_BW_USC = ((ret_value&0xffff0000)>>16);
+
+ /* hold cck counter */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
+ PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+
+ FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail);
+
+ FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
+ FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
+ FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
+ FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Cck_fail=%d\n", FalseAlmCnt->Cnt_Cck_fail));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Ofdm_fail=%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Total False Alarm=%d\n", FalseAlmCnt->Cnt_all));
@@ -1077,26 +911,11 @@ void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm)
{
- if ((pDM_Odm->SupportICType != ODM_RTL8192C) && (pDM_Odm->SupportICType != ODM_RTL8723A))
- return;
- if (!(pDM_Odm->SupportAbility & ODM_BB_PWR_SAVE))
- return;
- if (!(pDM_Odm->SupportPlatform & (ODM_MP|ODM_CE)))
- return;
-
- /* 1 2.Power Saving for 92C */
- if ((pDM_Odm->SupportICType == ODM_RTL8192C) && (pDM_Odm->RFType == ODM_2T2R)) {
- odm_1R_CCA(pDM_Odm);
- } else {
- /* 20100628 Joseph: Turn off BB power save for 88CE because it makesthroughput unstable. */
- /* 20100831 Joseph: Turn ON BB power save again after modifying AGC delay from 900ns ot 600ns. */
- /* 1 3.Power Saving for 88C */
- ODM_RF_Saving(pDM_Odm, false);
- }
}
void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
if (pDM_Odm->RSSI_Min != 0xFF) {
@@ -1118,11 +937,11 @@ void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
if (pDM_PSTable->PreCCAState != pDM_PSTable->CurCCAState) {
if (pDM_PSTable->CurCCAState == CCA_1R) {
if (pDM_Odm->RFType == ODM_2T2R)
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
+ PHY_SetBBReg(adapter, 0xc04, bMaskByte0, 0x13);
else
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
+ PHY_SetBBReg(adapter, 0xc04, bMaskByte0, 0x23);
} else {
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
+ PHY_SetBBReg(adapter, 0xc04, bMaskByte0, 0x33);
}
pDM_PSTable->PreCCAState = pDM_PSTable->CurCCAState;
}
@@ -1130,6 +949,7 @@ void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
u8 Rssi_Up_bound = 30;
u8 Rssi_Low_bound = 25;
@@ -1139,10 +959,10 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
Rssi_Low_bound = 45;
}
if (pDM_PSTable->initialize == 0) {
- pDM_PSTable->Reg874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord)&0x1CC000)>>14;
- pDM_PSTable->RegC70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord)&BIT3)>>3;
- pDM_PSTable->Reg85C = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord)&0xFF000000)>>24;
- pDM_PSTable->RegA74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord)&0xF000)>>12;
+ pDM_PSTable->Reg874 = (PHY_QueryBBReg(adapter, 0x874, bMaskDWord)&0x1CC000)>>14;
+ pDM_PSTable->RegC70 = (PHY_QueryBBReg(adapter, 0xc70, bMaskDWord)&BIT3)>>3;
+ pDM_PSTable->Reg85C = (PHY_QueryBBReg(adapter, 0x85c, bMaskDWord)&0xFF000000)>>24;
+ pDM_PSTable->RegA74 = (PHY_QueryBBReg(adapter, 0xa74, bMaskDWord)&0xF000)>>12;
pDM_PSTable->initialize = 1;
}
@@ -1168,26 +988,19 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
if (pDM_PSTable->CurRFState == RF_Save) {
- /* <tynli_note> 8723 RSSI report will be wrong. Set 0x874[5]=1 when enter BB power saving mode. */
- /* Suggested by SD3 Yu-Nan. 2011.01.20. */
- if (pDM_Odm->SupportICType == ODM_RTL8723A)
- ODM_SetBBReg(pDM_Odm, 0x874 , BIT5, 0x1); /* Reg874[5]=1b'1 */
- ODM_SetBBReg(pDM_Odm, 0x874 , 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, 0); /* RegC70[3]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
- ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0); /* Reg818[28]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x1); /* Reg818[28]=1'b1 */
+ PHY_SetBBReg(adapter, 0x874 , 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ PHY_SetBBReg(adapter, 0xc70, BIT3, 0); /* RegC70[3]=1'b0 */
+ PHY_SetBBReg(adapter, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
+ PHY_SetBBReg(adapter, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ PHY_SetBBReg(adapter, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
+ PHY_SetBBReg(adapter, 0x818, BIT28, 0x0); /* Reg818[28]=1'b0 */
+ PHY_SetBBReg(adapter, 0x818, BIT28, 0x1); /* Reg818[28]=1'b1 */
} else {
- ODM_SetBBReg(pDM_Odm, 0x874 , 0x1CC000, pDM_PSTable->Reg874);
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, pDM_PSTable->RegC70);
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->RegA74);
- ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0);
-
- if (pDM_Odm->SupportICType == ODM_RTL8723A)
- ODM_SetBBReg(pDM_Odm, 0x874, BIT5, 0x0); /* Reg874[5]=1b'0 */
+ PHY_SetBBReg(adapter, 0x874 , 0x1CC000, pDM_PSTable->Reg874);
+ PHY_SetBBReg(adapter, 0xc70, BIT3, pDM_PSTable->RegC70);
+ PHY_SetBBReg(adapter, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
+ PHY_SetBBReg(adapter, 0xa74, 0xF000, pDM_PSTable->RegA74);
+ PHY_SetBBReg(adapter, 0x818, BIT28, 0x0);
}
pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
}
@@ -1316,18 +1129,7 @@ void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
/* */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_RefreshRateAdaptiveMaskMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
- break;
- case ODM_AP:
- case ODM_ADSL:
- odm_RefreshRateAdaptiveMaskAPADSL(pDM_Odm);
- break;
- }
+ odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
}
void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm)
@@ -1440,32 +1242,11 @@ void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm)
/* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- case ODM_CE:
- odm_DynamicTxPowerNIC(pDM_Odm);
- break;
- case ODM_AP:
- odm_DynamicTxPowerAP(pDM_Odm);
- break;
- case ODM_ADSL:
- break;
- }
+ odm_DynamicTxPowerNIC(pDM_Odm);
}
void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm)
{
- if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
- return;
-
- if (pDM_Odm->SupportICType == ODM_RTL8188E) {
- /* ??? */
- /* This part need to be redefined. */
- }
-}
-
-void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm)
-{
}
/* 3============================================================ */
@@ -1482,27 +1263,9 @@ void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
/* */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_RSSIMonitorCheckMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_RSSIMonitorCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- odm_RSSIMonitorCheckAP(pDM_Odm);
- break;
- case ODM_ADSL:
- /* odm_DIGAP(pDM_Odm); */
- break;
- }
-
+ odm_RSSIMonitorCheckCE(pDM_Odm);
} /* odm_RSSIMonitorCheck */
-void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
static void FindMinimumRSSI(struct adapter *pAdapter)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
@@ -1575,28 +1338,6 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
ODM_CmnInfoUpdate(&pHalData->odmpriv , ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
}
-void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_InitializeTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer,
- (void *)odm_SwAntDivChkAntSwitchCallback, NULL, "SwAntennaSwitchTimer");
-}
-
-void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_CancelTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
-}
-
-void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
-
- ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->FastAntTrainingTimer);
-}
-
/* 3============================================================ */
/* 3 Tx Power Tracking */
/* 3============================================================ */
@@ -1623,19 +1364,7 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
/* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_TXPowerTrackingCheckMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_TXPowerTrackingCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- odm_TXPowerTrackingCheckAP(pDM_Odm);
- break;
- case ODM_ADSL:
- break;
- }
+ odm_TXPowerTrackingCheckCE(pDM_Odm);
}
void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
@@ -1656,14 +1385,6 @@ void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
}
}
-void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
/* antenna mapping info */
/* 1: right-side antenna */
/* 2/0: left-side antenna */
@@ -1675,21 +1396,6 @@ void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
/* 3============================================================ */
/* 3 SW Antenna Diversity */
/* 3============================================================ */
-void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID, struct odm_phy_status_info *pPhyInfo)
-{
-}
-
-void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step)
-{
-}
-
-void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm)
-{
-}
void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext)
{
@@ -1706,31 +1412,7 @@ void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
return;
}
- if (pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8192D))
- ;
- else if (pDM_Odm->SupportICType == ODM_RTL8188E)
- ODM_AntennaDiversityInit_88E(pDM_Odm);
-}
-
-void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- if (pDM_SWAT_Table->antsel == 1) {
- if (isCCKrate) {
- pDM_SWAT_Table->CCK_Ant1_Cnt[MacId]++;
- } else {
- pDM_SWAT_Table->OFDM_Ant1_Cnt[MacId]++;
- pDM_SWAT_Table->RSSI_Ant1_Sum[MacId] += PWDBAll;
- }
- } else {
- if (isCCKrate) {
- pDM_SWAT_Table->CCK_Ant2_Cnt[MacId]++;
- } else {
- pDM_SWAT_Table->OFDM_Ant2_Cnt[MacId]++;
- pDM_SWAT_Table->RSSI_Ant2_Sum[MacId] += PWDBAll;
- }
- }
+ ODM_AntennaDiversityInit_88E(pDM_Odm);
}
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
@@ -1740,8 +1422,7 @@ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
return;
}
- if (pDM_Odm->SupportICType == ODM_RTL8188E)
- ODM_AntennaDiversity_88E(pDM_Odm);
+ ODM_AntennaDiversity_88E(pDM_Odm);
}
/* EDCA Turbo */
@@ -1768,16 +1449,7 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
return;
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- break;
- case ODM_CE:
- odm_EdcaTurboCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- case ODM_ADSL:
- break;
- }
+ odm_EdcaTurboCheckCE(pDM_Odm);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================odm_EdcaTurboCheck\n"));
} /* odm_CheckEdcaTurbo */
@@ -1855,29 +1527,6 @@ dm_CheckEdcaTurbo_EXIT:
precvpriv->last_rx_bytes = precvpriv->rx_bytes;
}
-/* need to ODM CE Platform */
-/* move to here for ANT detection mechanism using */
-
-u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point, u8 initial_gain_psd)
-{
- u32 psd_report;
-
- /* Set DCO frequency index, offset=(40MHz/SamplePts)*point */
- ODM_SetBBReg(pDM_Odm, 0x808, 0x3FF, point);
-
- /* Start PSD calculation, Reg808[22]=0->1 */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 1);
- /* Need to wait for HW PSD report */
- ODM_StallExecution(30);
- ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 0);
- /* Read PSD report, Reg8B4[15:0] */
- psd_report = ODM_GetBBReg(pDM_Odm, 0x8B4, bMaskDWord) & 0x0000FFFF;
-
- psd_report = (u32) (ConvertTo_dB(psd_report))+(u32)(initial_gain_psd-0x1c);
-
- return psd_report;
-}
-
u32 ConvertTo_dB(u32 Value)
{
u8 i;
@@ -1902,270 +1551,3 @@ u32 ConvertTo_dB(u32 Value)
return dB;
}
-
-/* 2011/09/22 MH Add for 92D global spin lock utilization. */
-void odm_GlobalAdapterCheck(void)
-{
-} /* odm_GlobalAdapterCheck */
-
-/* Description: */
-/* Set Single/Dual Antenna default setting for products that do not do detection in advance. */
-/* Added by Joseph, 2012.03.22 */
-void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
-}
-
-
-/* 2 8723A ANT DETECT */
-
-static void odm_PHY_SaveAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegisterNum)
-{
- u32 i;
-
- /* RTPRINT(FINIT, INIT_IQK, ("Save ADDA parameters.\n")); */
- for (i = 0; i < RegisterNum; i++)
- AFEBackup[i] = ODM_GetBBReg(pDM_Odm, AFEReg[i], bMaskDWord);
-}
-
-static void odm_PHY_ReloadAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegiesterNum)
-{
- u32 i;
-
- for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(pDM_Odm, AFEReg[i], bMaskDWord, AFEBackup[i]);
-}
-
-/* 2 8723A ANT DETECT */
-/* Description: */
-/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
-/* This function is cooperated with BB team Neil. */
-bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
- u32 CurrentChannel, RfLoopReg;
- u8 n;
- u32 Reg88c, Regc08, Reg874, Regc50;
- u8 initial_gain = 0x5a;
- u32 PSD_report_tmp;
- u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
- bool bResult = true;
- u32 AFE_Backup[16];
- u32 AFE_REG_8723A[16] = {
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN,
- rFPGA0_XCD_SwitchControl, rBlue_Tooth};
-
- if (!(pDM_Odm->SupportICType & (ODM_RTL8723A|ODM_RTL8192C)))
- return bResult;
-
- if (!(pDM_Odm->SupportAbility&ODM_BB_ANT_DIV))
- return bResult;
-
- if (pDM_Odm->SupportICType == ODM_RTL8192C) {
- /* Which path in ADC/DAC is turnned on for PSD: both I/Q */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT10|BIT11, 0x3);
- /* Ageraged number: 8 */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT12|BIT13, 0x1);
- /* pts = 128; */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT14|BIT15, 0x0);
- }
-
- /* 1 Backup Current RF/BB Settings */
-
- CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask);
- RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, ODM_DPDT, Antenna_A); /* change to Antenna A */
- /* Step 1: USE IQK to transmitter single tone */
-
- ODM_StallExecution(10);
-
- /* Store A Path Register 88c, c08, 874, c50 */
- Reg88c = ODM_GetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord);
- Regc08 = ODM_GetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord);
- Reg874 = ODM_GetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord);
- Regc50 = ODM_GetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord);
-
- /* Store AFE Registers */
- odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- /* Set PSD 128 pts */
- ODM_SetBBReg(pDM_Odm, rFPGA0_PSDFunction, BIT14|BIT15, 0x0); /* 128 pts */
-
- /* To SET CH1 to do */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01); /* Channel 1 */
-
- /* AFE all on step */
- ODM_SetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_CCK_RFON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_CCK_BBON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_OFDM_RFON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_OFDM_BBON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_To_Rx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_To_Tx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_CCK, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_OFDM, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_Wait_RIFS, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_TO_Rx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rStandby, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rSleep, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rPMPD_ANAEN, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_SwitchControl, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rBlue_Tooth, bMaskDWord, 0x6FDB25A4);
-
- /* 3 wire Disable */
- ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, 0xCCF000C0);
-
- /* BB IQK Setting */
- ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800E4);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22208000);
-
- /* IQK setting tone@ 4.34Mhz */
- ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008C1C);
- ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00);
-
-
- /* Page B init */
- ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00080000);
- ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000);
- ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800);
- ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
- ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82150008);
- ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28150008);
- ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x001028d0);
-
- /* RF loop Setting */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
-
- /* IQK Single tone start */
- ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
- ODM_StallExecution(1000);
- PSD_report_tmp = 0x0;
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntA_report)
- AntA_report = PSD_report_tmp;
- }
-
- PSD_report_tmp = 0x0;
-
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B); /* change to Antenna B */
- ODM_StallExecution(10);
-
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntB_report)
- AntB_report = PSD_report_tmp;
- }
-
- /* change to open case */
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, 0); /* change to Ant A and B all open case */
- ODM_StallExecution(10);
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntO_report)
- AntO_report = PSD_report_tmp;
- }
-
- /* Close IQK Single Tone function */
- ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- PSD_report_tmp = 0x0;
-
- /* 1 Return to antanna A */
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
- ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, Reg88c);
- ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, Regc08);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, Reg874);
- ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, 0x7F, 0x40);
- ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord, Regc50);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, CurrentChannel);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
-
- /* Reload AFE Registers */
- odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_A[%d]= %d\n", 2416, AntA_report));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_B[%d]= %d\n", 2416, AntB_report));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_O[%d]= %d\n", 2416, AntO_report));
-
-
- if (pDM_Odm->SupportICType == ODM_RTL8723A) {
- /* 2 Test Ant B based on Ant A is ON */
- if (mode == ANTTESTB) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report+1)) {
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
- } else {
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Dual Antenna is A and B\n"));
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- } else if (mode == ANTTESTALL) {
- /* 2 Test Ant A and B based on DPDT Open */
- if ((AntO_report >= 100)&(AntO_report < 118)) {
- if (AntA_report > (AntO_report+1)) {
- pDM_SWAT_Table->ANTA_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is OFF"));
- } else {
- pDM_SWAT_Table->ANTA_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is ON"));
- }
-
- if (AntB_report > (AntO_report+2)) {
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is OFF"));
- } else {
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is ON"));
- }
- }
- }
- } else if (pDM_Odm->SupportICType == ODM_RTL8192C) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report+2)) {
- pDM_SWAT_Table->ANTA_ON = false;
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna B\n"));
- } else if (AntA_report > (AntB_report+2)) {
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
- } else {
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("ODM_SingleDualAntennaDetection(): Dual Antenna\n"));
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
- pDM_SWAT_Table->ANTA_ON = true; /* Set Antenna A on as default */
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- }
- return bResult;
-}
-
-/* Justin: According to the current RRSI to adjust Response Frame TX power, 2012/11/05 */
-void odm_dtc(struct odm_dm_struct *pDM_Odm)
-{
-}
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 19c509a..a755df3 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -39,61 +39,32 @@ static u8 odm_QueryRxPwrPercentage(s8 AntPower)
/* 2012/01/12 MH MOve some signal strength smooth method to MP HAL layer. */
/* IF other SW team do not support the feature, remove this section.?? */
-static s32 odm_sig_patch_lenove(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- return 0;
-}
-
-static s32 odm_sig_patch_netcore(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- return 0;
-}
-
static s32 odm_SignalScaleMapping_92CSeries(struct odm_dm_struct *dm_odm, s32 CurrSig)
{
s32 RetSig = 0;
- if ((dm_odm->SupportInterface == ODM_ITRF_USB) ||
- (dm_odm->SupportInterface == ODM_ITRF_SDIO)) {
- if (CurrSig >= 51 && CurrSig <= 100)
- RetSig = 100;
- else if (CurrSig >= 41 && CurrSig <= 50)
- RetSig = 80 + ((CurrSig - 40)*2);
- else if (CurrSig >= 31 && CurrSig <= 40)
- RetSig = 66 + (CurrSig - 30);
- else if (CurrSig >= 21 && CurrSig <= 30)
- RetSig = 54 + (CurrSig - 20);
- else if (CurrSig >= 10 && CurrSig <= 20)
- RetSig = 42 + (((CurrSig - 10) * 2) / 3);
- else if (CurrSig >= 5 && CurrSig <= 9)
- RetSig = 22 + (((CurrSig - 5) * 3) / 2);
- else if (CurrSig >= 1 && CurrSig <= 4)
- RetSig = 6 + (((CurrSig - 1) * 3) / 2);
- else
- RetSig = CurrSig;
- }
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40)*2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ else
+ RetSig = CurrSig;
return RetSig;
}
static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
{
- if ((dm_odm->SupportPlatform == ODM_MP) &&
- (dm_odm->SupportInterface != ODM_ITRF_PCIE) && /* USB & SDIO */
- (dm_odm->PatchID == 10))
- return odm_sig_patch_netcore(dm_odm, CurrSig);
- else if ((dm_odm->SupportPlatform == ODM_MP) &&
- (dm_odm->SupportInterface == ODM_ITRF_PCIE) &&
- (dm_odm->PatchID == 19))
- return odm_sig_patch_lenove(dm_odm, CurrSig);
- else
- return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
-}
-
-/* pMgntInfo->CustomerID == RT_CID_819x_Lenovo */
-static u8 odm_SQ_process_patch_RT_CID_819x_Lenovo(struct odm_dm_struct *dm_odm,
- u8 isCCKrate, u8 PWDB_ALL, u8 path, u8 RSSI)
-{
- return 0;
+ return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
}
static u8 odm_EVMdbToPercentage(s8 Value)
@@ -135,11 +106,10 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
if (isCCKrate) {
- u8 report;
u8 cck_agc_rpt;
dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
@@ -153,113 +123,51 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
/* In 88E, cck_highpwr is always set to 1 */
- if (dm_odm->SupportICType & (ODM_RTL8188E|ODM_RTL8812)) {
- LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
- VGA_idx = (cck_agc_rpt & 0x1F);
- switch (LNA_idx) {
- case 7:
- if (VGA_idx <= 27)
- rx_pwr_all = -100 + 2*(27-VGA_idx); /* VGA_idx = 27~2 */
- else
- rx_pwr_all = -100;
- break;
- case 6:
- rx_pwr_all = -48 + 2*(2-VGA_idx); /* VGA_idx = 2~0 */
- break;
- case 5:
- rx_pwr_all = -42 + 2*(7-VGA_idx); /* VGA_idx = 7~5 */
- break;
- case 4:
- rx_pwr_all = -36 + 2*(7-VGA_idx); /* VGA_idx = 7~4 */
- break;
- case 3:
- rx_pwr_all = -24 + 2*(7-VGA_idx); /* VGA_idx = 7~0 */
- break;
- case 2:
- if (cck_highpwr)
- rx_pwr_all = -12 + 2*(5-VGA_idx); /* VGA_idx = 5~0 */
- else
- rx_pwr_all = -6 + 2*(5-VGA_idx);
- break;
- case 1:
- rx_pwr_all = 8-2*VGA_idx;
- break;
- case 0:
- rx_pwr_all = 14-2*VGA_idx;
- break;
- default:
- break;
- }
- rx_pwr_all += 6;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
- if (!cck_highpwr) {
- if (PWDB_ALL >= 80)
- PWDB_ALL = ((PWDB_ALL-80)<<1)+((PWDB_ALL-80)>>1)+80;
- else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
- PWDB_ALL += 3;
- if (PWDB_ALL > 100)
- PWDB_ALL = 100;
- }
- } else {
- if (!cck_highpwr) {
- report = (cck_agc_rpt & 0xc0)>>6;
- switch (report) {
- /* 03312009 modified by cosa */
- /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
- /* Note: different RF with the different RNA gain. */
- case 0x3:
- rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- report = (cck_agc_rpt & 0x60)>>5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1) ;
- break;
- case 0x2:
- rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- }
- }
-
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
-
- /* Modification for ext-LNA board */
- if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
- if ((cck_agc_rpt>>7) == 0) {
- PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL+6);
- } else {
- if (PWDB_ALL > 38)
- PWDB_ALL -= 16;
- else
- PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL>>2) : (PWDB_ALL-12);
- }
-
- /* CCK modification */
- if (PWDB_ALL > 25 && PWDB_ALL <= 60)
- PWDB_ALL += 6;
- } else {/* Modification for int-LNA board */
- if (PWDB_ALL > 99)
- PWDB_ALL -= 8;
- else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
- PWDB_ALL += 4;
- }
+ LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ VGA_idx = (cck_agc_rpt & 0x1F);
+ switch (LNA_idx) {
+ case 7:
+ if (VGA_idx <= 27)
+ rx_pwr_all = -100 + 2*(27-VGA_idx); /* VGA_idx = 27~2 */
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2*(2-VGA_idx); /* VGA_idx = 2~0 */
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2*(7-VGA_idx); /* VGA_idx = 7~5 */
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2*(7-VGA_idx); /* VGA_idx = 7~4 */
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2*(7-VGA_idx); /* VGA_idx = 7~0 */
+ break;
+ case 2:
+ if (cck_highpwr)
+ rx_pwr_all = -12 + 2*(5-VGA_idx); /* VGA_idx = 5~0 */
+ else
+ rx_pwr_all = -6 + 2*(5-VGA_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8-2*VGA_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14-2*VGA_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ if (!cck_highpwr) {
+ if (PWDB_ALL >= 80)
+ PWDB_ALL = ((PWDB_ALL-80)<<1)+((PWDB_ALL-80)>>1)+80;
+ else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
+ PWDB_ALL += 3;
+ if (PWDB_ALL > 100)
+ PWDB_ALL = 100;
}
pPhyInfo->RxPWDBAll = PWDB_ALL;
@@ -269,9 +177,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
if (pPktinfo->bPacketMatchBSSID) {
u8 SQ, SQ_rpt;
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- SQ = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, 0, 0);
- } else if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
+ if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
SQ = 100;
} else {
SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
@@ -284,15 +190,15 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
SQ = ((64-SQ_rpt) * 100) / 44;
}
pPhyInfo->SignalQuality = SQ;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = SQ;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = SQ;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
}
} else { /* is OFDM rate */
dm_odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
/* (1)Get RSSI for HT rate */
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
+ for (i = RF_PATH_A; i < RF_PATH_MAX; i++) {
/* 2008/01/30 MH we will judge RF RX path now. */
if (dm_odm->RFPathRxEnable & BIT(i))
rf_rx_num++;
@@ -321,14 +227,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* Get Rx snr value in DB */
pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
-
- /* Record Signal Strength for next packet */
- if (pPktinfo->bPacketMatchBSSID) {
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- if (i == ODM_RF_PATH_A)
- pPhyInfo->SignalQuality = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, i, RSSI);
- }
- }
}
/* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
@@ -341,26 +239,22 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxPower = rx_pwr_all;
pPhyInfo->RecvSignalPower = rx_pwr_all;
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- /* do nothing */
- } else {
- /* (3)EVM of HT rate */
- if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
- Max_spatial_stream = 2; /* both spatial stream make sense */
- else
- Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
-
- for (i = 0; i < Max_spatial_stream; i++) {
- /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
- /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
- /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
- EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
-
- if (pPktinfo->bPacketMatchBSSID) {
- if (i == ODM_RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
- pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
- pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
- }
+ /* (3)EVM of HT rate */
+ if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ if (pPktinfo->bPacketMatchBSSID) {
+ if (i == RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
}
}
}
@@ -396,6 +290,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
u32 OFDM_pkt = 0;
u32 Weighting = 0;
struct sta_info *pEntry;
+ u8 antsel_tr_mux;
+ struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
if (pPktinfo->StationID == 0xFF)
return;
@@ -408,27 +304,23 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
/* Smart Antenna Debug Message------------------ */
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- u8 antsel_tr_mux;
- struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
-
- if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
- if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
- if (pPktinfo->bPacketToSelf) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
- (pDM_FatTable->antsel_rx_keep_1<<1) |
- pDM_FatTable->antsel_rx_keep_0;
- pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
- pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
- }
- }
- } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+
+ if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
+ if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
+ if (pPktinfo->bPacketToSelf) {
antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
- (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
- ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ (pDM_FatTable->antsel_rx_keep_1<<1) |
+ pDM_FatTable->antsel_rx_keep_0;
+ pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
+ pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
}
}
+ } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
+ (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
+ ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ }
}
/* Smart Antenna Debug Message------------------ */
@@ -438,15 +330,15 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
if (!isCCKrate) { /* ofdm rate */
- if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B] == 0) {
- RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_B] == 0) {
+ RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
} else {
- if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B]) {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[RF_PATH_B]) {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
} else {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
}
if ((RSSI_max - RSSI_min) < 3)
RSSI_Ave = RSSI_max;
@@ -531,9 +423,7 @@ static void ODM_PhyStatusQuery_92CSeries(struct odm_dm_struct *dm_odm,
odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
pPktinfo);
if (dm_odm->RSSI_test) {
- /* Select the packets to do RSSI checking for antenna switching. */
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon)
- ODM_SwAntDivChkPerPktRssi(dm_odm, pPktinfo->StationID, pPhyInfo);
+ ;/* Select the packets to do RSSI checking for antenna switching. */
} else {
odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
}
@@ -555,16 +445,14 @@ void ODM_MacStatusQuery(struct odm_dm_struct *dm_odm, u8 *mac_stat,
}
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
- enum ODM_RF_RADIO_PATH content,
- enum ODM_RF_RADIO_PATH rfpath)
+ enum rf_radio_path content,
+ enum rf_radio_path rfpath)
{
ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===>ODM_ConfigRFWithHeaderFile\n"));
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- if (rfpath == ODM_RF_PATH_A)
- READ_AND_CONFIG(8188E, _RadioA_1T_);
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_A:Rtl8188ERadioA_1TArray\n"));
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_B:Rtl8188ERadioB_1TArray\n"));
- }
+ if (rfpath == RF_PATH_A)
+ READ_AND_CONFIG(8188E, _RadioA_1T_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_A:Rtl8188ERadioA_1TArray\n"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_B:Rtl8188ERadioB_1TArray\n"));
ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("ODM_ConfigRFWithHeaderFile: Radio No %x\n", rfpath));
return HAL_STATUS_SUCCESS;
@@ -573,16 +461,14 @@ enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
enum odm_bb_config_type config_tp)
{
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- if (config_tp == CONFIG_BB_PHY_REG) {
- READ_AND_CONFIG(8188E, _PHY_REG_1T_);
- } else if (config_tp == CONFIG_BB_AGC_TAB) {
- READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
- } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
- READ_AND_CONFIG(8188E, _PHY_REG_PG_);
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8188EPHY_REG_PGArray\n"));
- }
+ if (config_tp == CONFIG_BB_PHY_REG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_1T_);
+ } else if (config_tp == CONFIG_BB_AGC_TAB) {
+ READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
+ } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_PG_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8188EPHY_REG_PGArray\n"));
}
return HAL_STATUS_SUCCESS;
}
@@ -590,7 +476,6 @@ enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *dm_odm)
{
u8 result = HAL_STATUS_SUCCESS;
- if (dm_odm->SupportICType == ODM_RTL8188E)
- result = READ_AND_CONFIG(8188E, _MAC_REG_);
+ result = READ_AND_CONFIG(8188E, _MAC_REG_);
return result;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index 58410f3..323eb93 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -34,73 +34,76 @@ void ODM_DIG_LowerBound_88E(struct odm_dm_struct *dm_odm)
static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
if (*(dm_odm->mp_mode) == 1) {
dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* 1:CG, 0:CS */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* 1:CG, 0:CS */
return;
}
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_RX_HWAntDivInit()\n"));
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT22, 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
+ PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
}
static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
if (*(dm_odm->mp_mode) == 1) {
dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, 0); /* Default RX (0/1) */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, 0); /* Default RX (0/1) */
return;
}
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_TRX_HWAntDivInit()\n"));
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
/* Tx Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
/* antenna mapping table */
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
+ PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
}
}
static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32, i;
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
u32 AntCombination = 2;
@@ -122,68 +125,65 @@ static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
dm_fat_tbl->FAT_State = FAT_NORMAL_STATE;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, 0x4c, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x4c, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- value32 = ODM_GetMACReg(dm_odm, 0x7B4, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x7b4, bMaskDWord, value32|(BIT16|BIT17)); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
+ value32 = PHY_QueryBBReg(adapter, 0x4c, bMaskDWord);
+ PHY_SetBBReg(adapter, 0x4c, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = PHY_QueryBBReg(adapter, 0x7B4, bMaskDWord);
+ PHY_SetBBReg(adapter, 0x7b4, bMaskDWord, value32|(BIT16|BIT17)); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
/* Match MAC ADDR */
- ODM_SetMACReg(dm_odm, 0x7b4, 0xFFFF, 0);
- ODM_SetMACReg(dm_odm, 0x7b0, bMaskDWord, 0);
+ PHY_SetBBReg(adapter, 0x7b4, 0xFFFF, 0);
+ PHY_SetBBReg(adapter, 0x7b0, bMaskDWord, 0);
- ODM_SetBBReg(dm_odm, 0x870, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, 0x864, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
- ODM_SetBBReg(dm_odm, 0xca4, bMaskDWord, 0x000000a0);
+ PHY_SetBBReg(adapter, 0x870, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ PHY_SetBBReg(adapter, 0x864, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ PHY_SetBBReg(adapter, 0xb2c, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ PHY_SetBBReg(adapter, 0xb2c, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ PHY_SetBBReg(adapter, 0xca4, bMaskDWord, 0x000000a0);
/* antenna mapping table */
if (AntCombination == 2) {
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ PHY_SetBBReg(adapter, 0x858, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ PHY_SetBBReg(adapter, 0x858, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 2);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte0, 1);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte1, 2);
}
} else if (AntCombination == 7) {
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 0); /* Reg858[10:8]=3'b000 */
- ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 1); /* Reg858[13:11]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x878, BIT16, 0);
- ODM_SetBBReg(dm_odm, 0x858, BIT15|BIT14, 2); /* Reg878[0],Reg858[14:15])=3'b010 */
- ODM_SetBBReg(dm_odm, 0x878, BIT19|BIT18|BIT17, 3);/* Reg878[3:1]=3b'011 */
- ODM_SetBBReg(dm_odm, 0x878, BIT22|BIT21|BIT20, 4);/* Reg878[6:4]=3b'100 */
- ODM_SetBBReg(dm_odm, 0x878, BIT25|BIT24|BIT23, 5);/* Reg878[9:7]=3b'101 */
- ODM_SetBBReg(dm_odm, 0x878, BIT28|BIT27|BIT26, 6);/* Reg878[12:10]=3b'110 */
- ODM_SetBBReg(dm_odm, 0x878, BIT31|BIT30|BIT29, 7);/* Reg878[15:13]=3b'111 */
+ PHY_SetBBReg(adapter, 0x858, BIT10|BIT9|BIT8, 0); /* Reg858[10:8]=3'b000 */
+ PHY_SetBBReg(adapter, 0x858, BIT13|BIT12|BIT11, 1); /* Reg858[13:11]=3'b001 */
+ PHY_SetBBReg(adapter, 0x878, BIT16, 0);
+ PHY_SetBBReg(adapter, 0x858, BIT15|BIT14, 2); /* Reg878[0],Reg858[14:15])=3'b010 */
+ PHY_SetBBReg(adapter, 0x878, BIT19|BIT18|BIT17, 3);/* Reg878[3:1]=3b'011 */
+ PHY_SetBBReg(adapter, 0x878, BIT22|BIT21|BIT20, 4);/* Reg878[6:4]=3b'100 */
+ PHY_SetBBReg(adapter, 0x878, BIT25|BIT24|BIT23, 5);/* Reg878[9:7]=3b'101 */
+ PHY_SetBBReg(adapter, 0x878, BIT28|BIT27|BIT26, 6);/* Reg878[12:10]=3b'110 */
+ PHY_SetBBReg(adapter, 0x878, BIT31|BIT30|BIT29, 7);/* Reg878[15:13]=3b'111 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 0);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte2, 2);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte3, 3);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte0, 4);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte1, 5);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte2, 6);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte3, 7);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte0, 0);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte1, 1);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte2, 2);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte3, 3);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte0, 4);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte1, 5);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte2, 6);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte3, 7);
}
}
/* Default Ant Setting when no fast training */
- ODM_SetBBReg(dm_odm, 0x80c, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
- ODM_SetBBReg(dm_odm, 0x864, BIT5|BIT4|BIT3, 0); /* Default RX */
- ODM_SetBBReg(dm_odm, 0x864, BIT8|BIT7|BIT6, 1); /* Optional RX */
+ PHY_SetBBReg(adapter, 0x80c, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ PHY_SetBBReg(adapter, 0x864, BIT5|BIT4|BIT3, 0); /* Default RX */
+ PHY_SetBBReg(adapter, 0x864, BIT8|BIT7|BIT6, 1); /* Optional RX */
/* Enter Traing state */
- ODM_SetBBReg(dm_odm, 0x864, BIT2|BIT1|BIT0, (AntCombination-1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
- ODM_SetBBReg(dm_odm, 0xc50, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ PHY_SetBBReg(adapter, 0x864, BIT2|BIT1|BIT0, (AntCombination-1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
+ PHY_SetBBReg(adapter, 0xc50, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
}
void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
{
- if (dm_odm->SupportICType != ODM_RTL8188E)
- return;
-
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->AntDivType=%d\n", dm_odm->AntDivType));
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->bIsMPChip=%s\n", (dm_odm->bIsMPChip ? "true" : "false")));
@@ -198,6 +198,7 @@ void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct adapter *adapter = dm_odm->Adapter;
u32 DefaultAnt, OptionalAnt;
if (dm_fat_tbl->RxIdleAnt != Ant) {
@@ -211,13 +212,13 @@ void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
}
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTSEL_CTRL_11N, BIT14|BIT13|BIT12, DefaultAnt); /* Default TX */
- ODM_SetMACReg(dm_odm, ODM_REG_RESP_TX_11N, BIT6|BIT7, DefaultAnt); /* Resp Tx */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ PHY_SetBBReg(adapter, ODM_REG_ANTSEL_CTRL_11N, BIT14|BIT13|BIT12, DefaultAnt); /* Default TX */
+ PHY_SetBBReg(adapter, ODM_REG_RESP_TX_11N, BIT6|BIT7, DefaultAnt); /* Resp Tx */
} else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
}
}
dm_fat_tbl->RxIdleAnt = Ant;
@@ -343,16 +344,18 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- if ((dm_odm->SupportICType != ODM_RTL8188E) || (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV)))
+ struct adapter *adapter = dm_odm->Adapter;
+
+ if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
if (!dm_odm->bLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_AntennaDiversity_88E(): No Link.\n"));
if (dm_fat_tbl->bBecomeLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn off HW AntDiv\n"));
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 0); /* Enable CCK AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 0); /* RegC50[7]=1'b1 enable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 0); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
return;
@@ -360,10 +363,10 @@ void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
if (!dm_fat_tbl->bBecomeLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn on HW AntDiv\n"));
/* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 1); /* Enable CCK AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 1); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
index 18c0533..6193d9f 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
@@ -21,25 +21,27 @@
#include "odm_precomp.h"
void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, enum ODM_RF_RADIO_PATH RF_PATH,
+ u32 Data, enum rf_radio_path RF_PATH,
u32 RegAddr)
{
- if (Addr == 0xffe) {
- ODM_sleep_ms(50);
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ if (Addr == 0xffe) {
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
- ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ PHY_SetRFReg(adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
}
@@ -48,7 +50,7 @@ void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
u32 content = 0x1000; /* RF_Content: radioa_txt */
u32 maskforPhySet = (u32)(content&0xE000);
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_A, Addr|maskforPhySet);
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, RF_PATH_A, Addr|maskforPhySet);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioA] %08X %08X\n", Addr, Data));
}
@@ -57,7 +59,7 @@ void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
u32 content = 0x1001; /* RF_Content: radiob_txt */
u32 maskforPhySet = (u32)(content&0xE000);
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_B, Addr|maskforPhySet);
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, RF_PATH_B, Addr|maskforPhySet);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioB] %08X %08X\n", Addr, Data));
}
@@ -70,9 +72,11 @@ void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ PHY_SetBBReg(adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
("===> ODM_ConfigBBWithHeaderFile: [AGC_TAB] %08X %08X\n",
@@ -83,17 +87,17 @@ void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
u32 Bitmask, u32 Data)
{
if (Addr == 0xfe) {
- ODM_sleep_ms(50);
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else{
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
("===> @@@@@@@ ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X %08X\n",
@@ -104,25 +108,27 @@ void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
+
if (Addr == 0xfe) {
- ODM_sleep_ms(50);
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
if (Addr == 0xa24)
pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ PHY_SetBBReg(adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
("===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X\n",
Addr, Data));
diff --git a/drivers/staging/rtl8188eu/hal/odm_interface.c b/drivers/staging/rtl8188eu/hal/odm_interface.c
index 59ad5bf..3cd6821 100644
--- a/drivers/staging/rtl8188eu/hal/odm_interface.c
+++ b/drivers/staging/rtl8188eu/hal/odm_interface.c
@@ -57,42 +57,6 @@ void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
rtw_write32(Adapter, RegAddr, Data);
}
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask);
-}
-
/* ODM Memory relative API. */
void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
{
@@ -110,68 +74,6 @@ s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u
return _rtw_memcmp(pBuf1, pBuf2, length);
}
-/* ODM MISC relative API. */
-void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
-{
-}
-
-void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
-{
-}
-
-/* Work item relative API. FOr MP driver only~! */
-void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
- RT_WORKITEM_CALL_BACK RtWorkItemCallback,
- void *pContext, const char *szID)
-{
-}
-
-void ODM_StartWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_StopWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_FreeWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_ScheduleWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_IsWorkItemScheduled(void *pRtWorkItem)
-{
-}
-
-/* ODM Timer relative API. */
-void ODM_StallExecution(u32 usDelay)
-{
- rtw_udelay_os(usDelay);
-}
-
-void ODM_delay_ms(u32 ms)
-{
- rtw_mdelay_os(ms);
-}
-
-void ODM_delay_us(u32 us)
-{
- rtw_udelay_os(us);
-}
-
-void ODM_sleep_ms(u32 ms)
-{
- rtw_msleep_os(ms);
-}
-
-void ODM_sleep_us(u32 us)
-{
- rtw_usleep_os(us);
-}
-
void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
{
_set_timer(pTimer, msDelay); /* ms */
@@ -190,10 +92,6 @@ void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
_cancel_timer_ex(pTimer);
}
-void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
-{
-}
-
/* ODM FW relative API. */
u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
u32 *pElementID, u32 *pCmdLen,
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 8be2ad7..ca0a708 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -655,8 +655,8 @@ _func_enter_;
SetFwRsvdPagePkt(adapt, false);
DLBcnCount++;
do {
- rtw_yield_os();
- /* rtw_mdelay_os(10); */
+ yield();
+ /* mdelay(10); */
/* check rsvd page download OK. */
rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
poll++;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index ec0028d..4c934e2 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -63,11 +63,6 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PLATFORM, ODM_CE);
- if (Adapter->interface_type == RTW_GSPI)
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, ODM_ITRF_SDIO);
- else
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, Adapter->interface_type);/* RTL871X_HCI_TYPE */
-
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
fab_ver = ODM_TSMC;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 52b3fba..5921db8 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -60,7 +60,7 @@ static s32 iol_execute(struct adapter *padapter, u8 control)
reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88|control);
- start = rtw_get_current_time();
+ start = jiffies;
while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
(passing_time = rtw_get_passing_time_ms(start)) < 1000) {
;
@@ -238,11 +238,11 @@ static void efuse_read_phymap_from_txpktbuf(
rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr+i);
rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
- start = rtw_get_current_time();
+ start = jiffies;
while (!(reg_0x143 = rtw_read8(adapter, REG_TXPKTBUF_DBG)) &&
(passing_time = rtw_get_passing_time_ms(start)) < 1000) {
DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, rtw_read8(adapter, 0x106));
- rtw_usleep_os(100);
+ msleep(1);
}
lo32 = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L);
@@ -372,7 +372,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
if (pbuf) {
for (addr = 0; addr < data_cnts; addr++) {
rtw_write32(Adapter, 0x140, addr);
- rtw_usleep_os(2);
+ msleep(1);
loop = 0;
do {
rstatus = (reg_140 = rtw_read32(Adapter, REG_PKTBUF_DBG_CTRL)&BIT24);
@@ -383,7 +383,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_H);
memcpy(pbuf+(addr*8+4), &fifo_data, 4);
}
- rtw_usleep_os(2);
+ msleep(1);
} while (!rstatus && (loop++ < 10));
}
rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
@@ -574,7 +574,7 @@ static s32 _FWFreeToGo(struct adapter *padapter)
DBG_88E("%s: Polling FW ready success!! REG_MCUFWDL:0x%08x\n", __func__, value32);
return _SUCCESS;
}
- rtw_udelay_os(5);
+ udelay(5);
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
DBG_88E("%s: Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", __func__, value32);
@@ -660,7 +660,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
}
_FWDownloadEnable(padapter, true);
- fwdl_start_time = rtw_get_current_time();
+ fwdl_start_time = jiffies;
while (1) {
/* reset the FWDL chksum */
rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_ChkSum_rpt);
@@ -2304,7 +2304,7 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */
pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x18)>>3;
if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
- pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;;
+ pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;
} else {
pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
index e97ba02..3d0e6c9 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
@@ -581,7 +581,7 @@ u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter)
void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value)
{
Hal_TriggerRFThermalMeter(pAdapter);
- rtw_msleep_os(1000);
+ msleep(1000);
*value = Hal_ReadRFThermalMeter(pAdapter);
}
@@ -614,7 +614,7 @@ void Hal_SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
- rtw_msleep_os(10);
+ msleep(10);
/* BB Reset */
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
@@ -652,29 +652,27 @@ void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
/* Start Single Tone. */
RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleToneTx: test start\n"));
/* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
- if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
- reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
- reg58 &= 0xFFFFFFF0;
- reg58 += 2;
- PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
- }
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ reg58 += 2;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x0);
PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x0);
if (is92C) {
_write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x01);
- rtw_usleep_os(100);
+ msleep(1);
if (rfPath == RF_PATH_A)
write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x10000); /* PAD all on. */
else if (rfPath == RF_PATH_B)
write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x10000); /* PAD all on. */
write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
} else {
write_rfreg(pAdapter, rfPath, 0x21, 0xd4000);
- rtw_usleep_os(100);
+ msleep(1);
write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
}
/* for dynamic set Power index. */
@@ -687,24 +685,22 @@ void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
/* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
/* <20120326, Kordan> Only in single tone mode. (asked by Edlu) */
- if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
- reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
- reg58 &= 0xFFFFFFF0;
- PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
- }
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x1);
write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
if (is92C) {
_write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x00);
- rtw_usleep_os(100);
+ msleep(1);
write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x32d75); /* PAD all on. */
write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x32d75); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
} else {
write_rfreg(pAdapter, rfPath, 0x21, 0x54000);
- rtw_usleep_os(100);
+ msleep(1);
write_rfreg(pAdapter, rfPath, 0x00, 0x30000); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
}
/* Stop for dynamic set Power index. */
@@ -832,7 +828,7 @@ void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart)
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
/* Delay 10 ms */
- rtw_msleep_os(10);
+ msleep(10);
/* BB Reset */
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
index 68bb96d..8079fc6 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
@@ -190,12 +190,12 @@ phy_RFSerialRead(
tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong&(~bLSSIReadEdge));
- rtw_udelay_os(10);/* PlatformStallExecution(10); */
+ udelay(10);/* PlatformStallExecution(10); */
PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
- rtw_udelay_os(100);/* PlatformStallExecution(100); */
+ udelay(100);/* PlatformStallExecution(100); */
- rtw_udelay_os(10);/* PlatformStallExecution(10); */
+ udelay(10);/* PlatformStallExecution(10); */
if (eRFPath == RF_PATH_A)
RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT8);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
index 299e03e..b1cb5c4 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
@@ -502,27 +502,27 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
}
/*----Set RF_ENV enable----*/
PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
/*----Set RF_ENV output high----*/
PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
/* Set bit number of Address and Data for RF register */
PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
switch (eRFPath) {
case RF_PATH_A:
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
rtStatus = _FAIL;
break;
case RF_PATH_B:
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
rtStatus = _FAIL;
break;
case RF_PATH_C:
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 05e2475..511f61c 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -86,7 +86,7 @@ void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat
pattrib = &precvframe->u.hdr.attrib;
_rtw_memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
- pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);;/* u8)prxreport->crc32; */
+ pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);/* u8)prxreport->crc32; */
/* update rx report to recv_frame attribute */
pattrib->pkt_rpt_type = (u8)((le32_to_cpu(report.rxdw3) >> 14) & 0x3);/* prxreport->rpt_sel; */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
index 96d698e..047b534 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
@@ -43,7 +43,7 @@ void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
rtl8188e_silentreset_for_specific_platform(padapter);
}
/* total xmit irp = 4 */
- current_time = rtw_get_current_time();
+ current_time = jiffies;
if (0 == pxmitpriv->free_xmitbuf_cnt) {
diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_time);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 0f47b89..17c94f4 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -75,7 +75,7 @@ int rtl8188eu_init_recv_priv(struct adapter *padapter)
for (i = 0; i < NR_RECVBUFF; i++) {
_rtw_init_listhead(&precvbuf->list);
- _rtw_spinlock_init(&precvbuf->recvbuf_lock);
+ spin_lock_init(&precvbuf->recvbuf_lock);
precvbuf->alloc_sz = MAX_RECVBUF_SZ;
res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
if (res == _FAIL)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 8f43f49..6fb6a46 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -445,7 +445,6 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
struct sta_info *psta = NULL;
struct tx_servq *ptxservq = NULL;
- unsigned long irql;
struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
u32 pbuf; /* next pkt address */
@@ -535,7 +534,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
phwxmit = pxmitpriv->hwxmits + 2;
break;
}
- _enter_critical_bh(&pxmitpriv->lock, &irql);
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&ptxservq->sta_pending);
xmitframe_plist = get_next(xmitframe_phead);
@@ -591,7 +590,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
if (_rtw_queue_empty(&ptxservq->sta_pending) == true)
rtw_list_delete(&ptxservq->tx_pending);
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
if ((pfirstframe->attrib.ether_type != 0x0806) &&
(pfirstframe->attrib.ether_type != 0x888e) &&
(pfirstframe->attrib.ether_type != 0x88b4) &&
@@ -641,14 +640,13 @@ static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe
*/
static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
- unsigned long irql;
s32 res;
struct xmit_buf *pxmitbuf = NULL;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
- _enter_critical_bh(&pxmitpriv->lock, &irql);
+ spin_lock_bh(&pxmitpriv->lock);
if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
goto enqueue;
@@ -660,7 +658,7 @@ static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
if (pxmitbuf == NULL)
goto enqueue;
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
pxmitframe->pxmitbuf = pxmitbuf;
pxmitframe->buf_addr = pxmitbuf->pbuf;
@@ -675,7 +673,7 @@ static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
enqueue:
res = rtw_xmitframe_enqueue(adapt, pxmitframe);
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
if (res != _SUCCESS) {
RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index cca9732..b24ad49 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -705,7 +705,7 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u32 init_start_time = rtw_get_current_time();
+ u32 init_start_time = jiffies;
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
@@ -1251,7 +1251,7 @@ static void _ReadRFType(struct adapter *Adapter)
static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
{
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
MSG_88E("====> %s\n", __func__);
@@ -1894,7 +1894,7 @@ _func_enter_;
/* RQPN Load 0 */
rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
rtw_write32(Adapter, REG_RQPN, 0x80000000);
- rtw_mdelay_os(10);
+ mdelay(10);
}
}
break;
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
index 787763e..31ae21a 100644
--- a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -723,6 +723,5 @@ void rtl8188eu_set_intf_ops(struct _io_ops *pops)
void rtl8188eu_set_hw_type(struct adapter *adapt)
{
adapt->chip_type = RTL8188E;
- adapt->HardwareType = HARDWARE_TYPE_RTL8188EU;
DBG_88E("CHIP TYPE: RTL8188E\n");
}
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
index 20d0b3e..aebf1d3 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
@@ -160,7 +160,7 @@
#define RTL8188E_TRANS_END \
/* format */ \
/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0, PWR_CMD_END, 0, 0}, /* */
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0}, /* */
extern struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index ad073c8..a492a1c 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -191,7 +191,7 @@ struct dvobj_priv {
struct usb_interface *pusbintf;
struct usb_device *pusbdev;
- ATOMIC_T continual_urb_error;
+ atomic_t continual_urb_error;
};
static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
@@ -225,8 +225,6 @@ struct adapter {
int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
int bDongle;/* build-in module or external dongle */
u16 chip_type;
- u16 HardwareType;
- u16 interface_type;/* USB,SDIO,SPI,PCI */
struct dvobj_priv *dvobj;
struct mlme_priv mlmepriv;
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 439c3c9..c274b34 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -286,39 +286,10 @@ enum rt_eeprom_type {
#define RF_CHANGE_BY_SW BIT31
enum hardware_type {
- HARDWARE_TYPE_RTL8180,
- HARDWARE_TYPE_RTL8185,
- HARDWARE_TYPE_RTL8187,
- HARDWARE_TYPE_RTL8188,
- HARDWARE_TYPE_RTL8190P,
- HARDWARE_TYPE_RTL8192E,
- HARDWARE_TYPE_RTL819xU,
- HARDWARE_TYPE_RTL8192SE,
- HARDWARE_TYPE_RTL8192SU,
- HARDWARE_TYPE_RTL8192CE,
- HARDWARE_TYPE_RTL8192CU,
- HARDWARE_TYPE_RTL8192DE,
- HARDWARE_TYPE_RTL8192DU,
- HARDWARE_TYPE_RTL8723AE,
- HARDWARE_TYPE_RTL8723AU,
- HARDWARE_TYPE_RTL8723AS,
- HARDWARE_TYPE_RTL8188EE,
HARDWARE_TYPE_RTL8188EU,
- HARDWARE_TYPE_RTL8188ES,
HARDWARE_TYPE_MAX,
};
-/* RTL8188E Series */
-#define IS_HARDWARE_TYPE_8188EE(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EE)
-#define IS_HARDWARE_TYPE_8188EU(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EU)
-#define IS_HARDWARE_TYPE_8188ES(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188ES)
-#define IS_HARDWARE_TYPE_8188E(_Adapter) \
-(IS_HARDWARE_TYPE_8188EE(_Adapter) || IS_HARDWARE_TYPE_8188EU(_Adapter) || \
- IS_HARDWARE_TYPE_8188ES(_Adapter))
-
#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index eaa4bc1..9d1a79c 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -151,7 +151,7 @@ struct rtl_ps {
int Rssi_val_min;
u8 initialize;
- u32 Reg874,RegC70,Reg85C,RegA74;
+ u32 Reg874, RegC70, Reg85C, RegA74;
};
@@ -454,29 +454,7 @@ enum odm_ability_def {
ODM_RF_CALIBRATION = BIT26,
};
-/* ODM_CMNINFO_INTERFACE */
-enum odm_interface_def {
- ODM_ITRF_PCIE = 0x1,
- ODM_ITRF_USB = 0x2,
- ODM_ITRF_SDIO = 0x4,
- ODM_ITRF_ALL = 0x7,
-};
-
-/* ODM_CMNINFO_IC_TYPE */
-enum odm_ic_type {
- ODM_RTL8192S = BIT0,
- ODM_RTL8192C = BIT1,
- ODM_RTL8192D = BIT2,
- ODM_RTL8723A = BIT3,
- ODM_RTL8188E = BIT4,
- ODM_RTL8812 = BIT5,
- ODM_RTL8821 = BIT6,
-};
-
-#define ODM_IC_11N_SERIES \
- (ODM_RTL8192S | ODM_RTL8192C | ODM_RTL8192D | \
- ODM_RTL8723A | ODM_RTL8188E)
-#define ODM_IC_11AC_SERIES (ODM_RTL8812)
+#define ODM_RTL8188E BIT4
/* ODM_CMNINFO_CUT_VER */
enum odm_cut_version {
@@ -950,13 +928,6 @@ struct odm_dm_struct {
#define ODM_RF_PATH_MAX 3
-enum ODM_RF_RADIO_PATH {
- ODM_RF_PATH_A = 0, /* Radio Path A */
- ODM_RF_PATH_B = 1, /* Radio Path B */
- ODM_RF_PATH_C = 2, /* Radio Path C */
- ODM_RF_PATH_D = 3, /* Radio Path D */
-};
-
enum ODM_RF_CONTENT {
odm_radioa_txt = 0x1000,
odm_radiob_txt = 0x1001,
@@ -1128,69 +1099,28 @@ extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
-void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
-void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
-
-void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
-
-
+#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
#define dm_RF_Saving ODM_RF_Saving
-void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
-
-#define SwAntDivRestAfterLink ODM_SwAntDivRestAfterLink
-void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm);
-#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
-
+void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
bool bForceUpdate, u8 *pRATRState);
-
-#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
-void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID,
- struct odm_phy_status_info *pPhyInfo);
-
u32 ConvertTo_dB(u32 Value);
-
-u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point,
- u8 initial_gain_psd);
-
-void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
-
u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid,
u32 ra_mask, u8 rssi_level);
-
-void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
-
-void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
-
void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo, u32 Value);
-
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo, void *pValue);
-
void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo,
u16 Index, void *pValue);
-
-void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
-
-void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm);
-
-void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId,
- u32 PWDBAll, bool isCCKrate);
-
-void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm);
-
-bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode);
-
-void odm_dtc(struct odm_dm_struct *pDM_Odm);
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index df52722..49e7e16 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -121,8 +121,8 @@ void ODM_MacStatusQuery(struct odm_dm_struct *pDM_Odm,
bool bPacketBeacon);
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm,
- enum ODM_RF_RADIO_PATH Content,
- enum ODM_RF_RADIO_PATH eRFPath);
+ enum rf_radio_path Content,
+ enum rf_radio_path eRFPath);
enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *pDM_Odm,
enum odm_bb_config_type ConfigType);
diff --git a/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
index 727e6b2..f2bf7a0 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
@@ -21,7 +21,7 @@
#define __INC_ODM_REGCONFIG_H_8188E
void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
- enum ODM_RF_RADIO_PATH RF_PATH, u32 RegAddr);
+ enum rf_radio_path RF_PATH, u32 RegAddr);
void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
u32 Addr, u32 Data);
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index 622f4c1..e8c4cab 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -94,18 +94,7 @@
#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
if (((comp) & pDM_Odm->DebugComponents) && \
(level <= pDM_Odm->DebugLevel)) { \
- if (pDM_Odm->SupportICType == ODM_RTL8192C) \
- DbgPrint("[ODM-92C] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8192D) \
- DbgPrint("[ODM-92D] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8723A) \
- DbgPrint("[ODM-8723A] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8188E) \
- DbgPrint("[ODM-8188E] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8812) \
- DbgPrint("[ODM-8812] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8821) \
- DbgPrint("[ODM-8821] "); \
+ DbgPrint("[ODM-8188E] "); \
RT_PRINTK fmt; \
}
@@ -136,7 +125,7 @@
DbgPrint(title_str); \
DbgPrint(" "); \
for (__i = 0; __i < 6; __i++) \
- DbgPrint("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+ DbgPrint("%02X%s", __ptr[__i], (__i == 5)?"":"-");\
DbgPrint("\n"); \
}
diff --git a/drivers/staging/rtl8188eu/include/odm_interface.h b/drivers/staging/rtl8188eu/include/odm_interface.h
index e5c8704..a50eae3 100644
--- a/drivers/staging/rtl8188eu/include/odm_interface.h
+++ b/drivers/staging/rtl8188eu/include/odm_interface.h
@@ -51,7 +51,7 @@ ODM_REG(DIG,_pDM_Odm)
#define _cat(_name, _ic_type, _func) \
( \
- ((_ic_type) & ODM_IC_11N_SERIES) ? _func##_11N(_name) : \
+ (_ic_type) ? _func##_11N(_name) : \
_func##_11AC(_name) \
)
@@ -64,7 +64,7 @@ ODM_REG(DIG,_pDM_Odm)
enum odm_h2c_cmd {
ODM_H2C_RSSI_REPORT = 0,
- ODM_H2C_PSD_RESULT= 1,
+ ODM_H2C_PSD_RESULT = 1,
ODM_H2C_PathDiv = 2,
ODM_MAX_H2CCMD
};
@@ -89,22 +89,6 @@ void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data);
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask);
-
/* Memory Relative Function. */
void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
@@ -112,39 +96,7 @@ void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
u32 length);
-/* ODM MISC-spin lock relative API. */
-void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm,
- enum RT_SPINLOCK_TYPE type);
-
-void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm,
- enum RT_SPINLOCK_TYPE type);
-
-/* ODM MISC-workitem relative API. */
-void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
- RT_WORKITEM_CALL_BACK RtWorkItemCallback,
- void *pContext, const char *szID);
-
-void ODM_StartWorkItem(void *pRtWorkItem);
-
-void ODM_StopWorkItem(void *pRtWorkItem);
-
-void ODM_FreeWorkItem(void *pRtWorkItem);
-
-void ODM_ScheduleWorkItem(void *pRtWorkItem);
-
-void ODM_IsWorkItemScheduled(void *pRtWorkItem);
-
/* ODM Timer relative API. */
-void ODM_StallExecution(u32 usDelay);
-
-void ODM_delay_ms(u32 ms);
-
-void ODM_delay_us(u32 us);
-
-void ODM_sleep_ms(u32 ms);
-
-void ODM_sleep_us(u32 us);
-
void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
u32 msDelay);
@@ -154,8 +106,6 @@ void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
-void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
-
/* ODM FW relative API. */
u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
u32 *pElementID, u32 *pCmdLen, u8 **pCmbBuffer,
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index d1d95f4..6e6a656 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -64,7 +64,6 @@ void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
-void odm_GlobalAdapterCheck(void);
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm);
void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
@@ -76,22 +75,16 @@ void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step);
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm);
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm);
void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
void odm_1R_CCA(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm);
void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm);
void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm);
void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext);
void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 36523ed..7956f0c 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -24,13 +24,12 @@
#define _FAIL 0
#define _SUCCESS 1
-#define RTW_RX_HANDLED 2
+#define RTW_RX_HANDLED 2
#include <linux/spinlock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
@@ -63,8 +62,6 @@ struct __queue {
spinlock_t lock;
};
-#define thread_exit() complete_and_exit(NULL, 0)
-
static inline struct list_head *get_next(struct list_head *list)
{
return list->next;
@@ -72,45 +69,15 @@ static inline struct list_head *get_next(struct list_head *list)
static inline struct list_head *get_list_head(struct __queue *queue)
{
- return (&(queue->queue));
+ return &(queue->queue);
}
#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
-
-
-static inline void _enter_critical(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_lock_irqsave(plock, *pirqL);
-}
-
-static inline void _exit_critical(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_unlock_irqrestore(plock, *pirqL);
-}
-
-static inline void _enter_critical_ex(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_lock_irqsave(plock, *pirqL);
-}
-
-static inline void _exit_critical_ex(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_unlock_irqrestore(plock, *pirqL);
-}
+ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
-static inline void _enter_critical_bh(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_lock_bh(plock);
-}
-
-static inline void _exit_critical_bh(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_unlock_bh(plock);
-}
-
-static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+static inline int _enter_critical_mutex(struct mutex *pmutex,
+ unsigned long *pirqL)
{
int ret;
@@ -119,7 +86,8 @@ static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pir
}
-static inline void _exit_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+static inline void _exit_critical_mutex(struct mutex *pmutex,
+ unsigned long *pirqL)
{
mutex_unlock(pmutex);
}
@@ -129,29 +97,33 @@ static inline void rtw_list_delete(struct list_head *plist)
list_del_init(plist);
}
-static inline void _init_timer(struct timer_list *ptimer,struct net_device *nic_hdl,void *pfunc,void* cntx)
+static inline void _init_timer(struct timer_list *ptimer,
+ struct net_device *nic_hdl,
+ void *pfunc, void *cntx)
{
ptimer->function = pfunc;
ptimer->data = (unsigned long)cntx;
init_timer(ptimer);
}
-static inline void _set_timer(struct timer_list *ptimer,u32 delay_time)
+static inline void _set_timer(struct timer_list *ptimer, u32 delay_time)
{
mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
}
-static inline void _cancel_timer(struct timer_list *ptimer,u8 *bcancelled)
+static inline void _cancel_timer(struct timer_list *ptimer, u8 *bcancelled)
{
del_timer_sync(ptimer);
- *bcancelled= true;/* true ==1; false==0 */
+ *bcancelled = true;/* true ==1; false==0 */
}
#define RTW_TIMER_HDL_ARGS void *FunctionContext
#define RTW_TIMER_HDL_NAME(name) rtw_##name##_timer_hdl
-#define RTW_DECLARE_TIMER_HDL(name) void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
+#define RTW_DECLARE_TIMER_HDL(name) \
+ void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
-static inline void _init_workitem(struct work_struct *pwork, void *pfunc, void * cntx)
+static inline void _init_workitem(struct work_struct *pwork, void *pfunc,
+ void *cntx)
{
INIT_WORK(pwork, pfunc);
}
@@ -165,23 +137,6 @@ static inline void _cancel_workitem_sync(struct work_struct *pwork)
{
cancel_work_sync(pwork);
}
-/* */
-/* Global Mutex: can only be used at PASSIVE level. */
-/* */
-
-#define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
- { \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
- msleep(10); \
- } \
-}
-
-#define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
-}
static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
{
@@ -207,7 +162,7 @@ static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
}
#ifndef BIT
- #define BIT(x) ( 1 << (x))
+ #define BIT(x) (1 << (x))
#endif
#define BIT0 0x00000001
@@ -301,20 +256,13 @@ void rtw_list_insert_head(struct list_head *plist, struct list_head *phead);
void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead);
void rtw_list_delete(struct list_head *plist);
-void _rtw_init_sema(struct semaphore *sema, int init_val);
-void _rtw_free_sema(struct semaphore *sema);
-void _rtw_up_sema(struct semaphore *sema);
u32 _rtw_down_sema(struct semaphore *sema);
-void _rtw_mutex_init(struct mutex *pmutex);
-void _rtw_mutex_free(struct mutex *pmutex);
-void _rtw_spinlock_init(spinlock_t *plock);
-void _rtw_spinlock_free(spinlock_t *plock);
void _rtw_init_queue(struct __queue *pqueue);
u32 _rtw_queue_empty(struct __queue *pqueue);
-u32 rtw_end_of_queue_search(struct list_head *queue, struct list_head *pelement);
+u32 rtw_end_of_queue_search(struct list_head *queue,
+ struct list_head *pelement);
-u32 rtw_get_current_time(void);
u32 rtw_systime_to_ms(u32 systime);
u32 rtw_ms_to_systime(u32 ms);
s32 rtw_get_passing_time_ms(u32 start);
@@ -322,32 +270,21 @@ s32 rtw_get_time_interval_ms(u32 start, u32 end);
void rtw_sleep_schedulable(int ms);
-void rtw_msleep_os(int ms);
-void rtw_usleep_os(int us);
-
u32 rtw_atoi(u8 *s);
-void rtw_mdelay_os(int ms);
-void rtw_udelay_os(int us);
-
-void rtw_yield_os(void);
-
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
{
return del_timer_sync(ptimer);
}
-static __inline void thread_enter(char *name)
+static inline void thread_enter(char *name)
{
-#ifdef daemonize
- daemonize("%s", name);
-#endif
allow_signal(SIGTERM);
}
static inline void flush_signals_thread(void)
{
- if (signal_pending (current))
+ if (signal_pending(current))
flush_signals(current);
}
@@ -357,13 +294,13 @@ static inline int res_to_status(int res)
}
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
+#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0 : 1)) << 2)
static inline u32 _RND4(u32 sz)
{
u32 val;
- val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
+ val = ((sz >> 2) + ((sz & 3) ? 1 : 0)) << 2;
return val;
}
@@ -371,7 +308,7 @@ static inline u32 _RND8(u32 sz)
{
u32 val;
- val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
+ val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
return val;
}
@@ -379,7 +316,7 @@ static inline u32 _RND128(u32 sz)
{
u32 val;
- val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
+ val = ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
return val;
}
@@ -387,7 +324,7 @@ static inline u32 _RND256(u32 sz)
{
u32 val;
- val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
+ val = ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
return val;
}
@@ -395,7 +332,7 @@ static inline u32 _RND512(u32 sz)
{
u32 val;
- val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
+ val = ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
return val;
}
@@ -404,32 +341,14 @@ static inline u32 bitshift(u32 bitmask)
u32 i;
for (i = 0; i <= 31; i++)
- if (((bitmask>>i) & 0x1) == 1) break;
+ if (((bitmask>>i) & 0x1) == 1)
+ break;
return i;
}
/* limitation of path length */
#define PATH_LENGTH_MAX PATH_MAX
-void rtw_suspend_lock_init(void);
-void rtw_suspend_lock_uninit(void);
-void rtw_lock_suspend(void);
-void rtw_unlock_suspend(void);
-
-/* Atomic integer operations */
-#define ATOMIC_T atomic_t
-
-void ATOMIC_SET(ATOMIC_T *v, int i);
-int ATOMIC_READ(ATOMIC_T *v);
-void ATOMIC_ADD(ATOMIC_T *v, int i);
-void ATOMIC_SUB(ATOMIC_T *v, int i);
-void ATOMIC_INC(ATOMIC_T *v);
-void ATOMIC_DEC(ATOMIC_T *v);
-int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i);
-int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i);
-int ATOMIC_INC_RETURN(ATOMIC_T *v);
-int ATOMIC_DEC_RETURN(ATOMIC_T *v);
-
struct rtw_netdev_priv_indicator {
void *priv;
u32 sizeof_priv;
@@ -451,7 +370,7 @@ void rtw_free_netdev(struct net_device *netdev);
#define FUNC_ADPT_FMT "%s(%s)"
#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
-#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1)
+#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
u64 rtw_modular64(u64 x, u64 y);
u64 rtw_division64(u64 x, u64 y);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 555c801..161f1e5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -456,9 +456,9 @@ void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent,
+void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent,
bool AutoLoadFail);
-void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent,
+void Hal_ReadThermalMeter_88E(struct adapter *dapter, u8 *PROMContent,
bool AutoloadFail);
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 02ccb40..a8facf0 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -58,11 +58,11 @@ enum rx_packet_type {
#define INTERRUPT_MSG_FORMAT_LEN 60
void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *buf);
s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
-void rtl8188eu_free_recv_priv(struct adapter * padapter);
-void rtl8188eu_recv_hdl(struct adapter * padapter, struct recv_buf *precvbuf);
+void rtl8188eu_free_recv_priv(struct adapter *padapter);
+void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
void rtl8188e_query_rx_phy_status(union recv_frame *fr, struct phy_stat *phy);
-void rtl8188e_process_phy_info(struct adapter * padapter, void *prframe);
+void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe);
void update_recvframe_phyinfo_88e(union recv_frame *fra, struct phy_stat *phy);
void update_recvframe_attrib_88e(union recv_frame *fra, struct recv_stat *stat);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index c12c56b9..2c33eb3 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -68,7 +68,7 @@
#define DISABLE_TRXPKT_BUF_ACCESS 0x0
-/* 0x0000h ~ 0x00FFh System Configuration */
+/* 0x0000h ~ 0x00FFh System Configuration */
#define REG_SYS_ISO_CTRL 0x0000
#define REG_SYS_FUNC_EN 0x0002
#define REG_APS_FSMCO 0x0004
@@ -142,7 +142,7 @@
#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
#define REG_CR 0x0100
#define REG_PBP 0x0104
#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
@@ -188,7 +188,7 @@
#define REG_LLT_INIT 0x01E0
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
#define REG_RQPN 0x0200
#define REG_FIFOPAGE 0x0204
#define REG_TDECTRL 0x0208
@@ -196,12 +196,12 @@
#define REG_TXDMA_STATUS 0x0210
#define REG_RQPN_NPQ 0x0214
-/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
#define REG_RXDMA_AGG_PG_TH 0x0280
#define REG_RXPKT_NUM 0x0284
#define REG_RXDMA_STATUS 0x0288
-/* 0x0300h ~ 0x03FFh PCIe */
+/* 0x0300h ~ 0x03FFh PCIe */
#define REG_PCIE_CTRL_REG 0x0300
#define REG_INT_MIG 0x0304 /* Interrupt Migration */
#define REG_BCNQ_DESA 0x0308 /* TX Beacon Descr Address */
@@ -222,7 +222,7 @@
#define REG_PCIE_HISR 0x03A0
/* spec version 11 */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
#define REG_VOQ_INFORMATION 0x0400
#define REG_VIQ_INFORMATION 0x0404
#define REG_BEQ_INFORMATION 0x0408
@@ -276,7 +276,7 @@
#define REG_TX_RPT_TIME 0x04F0 /* 2 byte */
#define REG_DUMMY 0x04FC
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
#define REG_EDCA_VO_PARAM 0x0500
#define REG_EDCA_VI_PARAM 0x0504
#define REG_EDCA_BE_PARAM 0x0508
@@ -294,16 +294,16 @@
#define REG_DIS_TXREQ_CLR 0x0523
#define REG_RD_CTRL 0x0524
/* Format for offset 540h-542h: */
-/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
+/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
* beacon content before TBTT. */
-/* [7:4]: Reserved. */
-/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
+/* [7:4]: Reserved. */
+/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
* to send the beacon packet. */
-/* [23:20]: Reserved */
+/* [23:20]: Reserved */
/* Description: */
-/* | */
+/* | */
/* |<--Setup--|--Hold------------>| */
-/* --------------|---------------------- */
+/* --------------|---------------------- */
/* | */
/* TBTT */
/* Note: We cannot update beacon content to HW or send any AC packets during
@@ -335,7 +335,7 @@
#define REG_FW_RESET_TSF_CNT_0 0x05FD
#define REG_FW_BCN_DIS_CNT 0x05FE
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
#define REG_APSD_CTRL 0x0600
#define REG_BWOPMODE 0x0603
#define REG_TCR 0x0604
@@ -382,7 +382,7 @@
#define _RXERR_RPT_SEL(type) ((type) << 28)
/* Note: */
-/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
* The default value is always too small, but the WiFi TestPlan test
* by 25,000 microseconds of NAV through sending CTS in the air.
* We must update this value greater than 25,000 microseconds to pass
@@ -422,7 +422,7 @@
#define REG_MACID1 0x0700
#define REG_BSSID1 0x0708
-/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
#define REG_USB_INFO 0xFE17
#define REG_USB_SPECIAL_OPTION 0xFE55
#define REG_USB_DMA_AGG_TO 0xFE5B
@@ -689,13 +689,13 @@ Current IOREG MAP
0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
*/
-/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
+/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
+/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
* RTL8192S/RTL8192C are wrong, */
-/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
+/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
* and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. */
+/* 8723 and 88E may be not correct either in the earlier version. */
#define StopBecon BIT6
#define StopHigh BIT5
#define StopMgt BIT4
@@ -733,7 +733,7 @@ Current IOREG MAP
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
-/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
#define REG_USB_INFO 0xFE17
#define REG_USB_SPECIAL_OPTION 0xFE55
#define REG_USB_DMA_AGG_TO 0xFE5B
@@ -743,7 +743,7 @@ Current IOREG MAP
#define REG_USB_HRPWM 0xFE58
#define REG_USB_HCPWM 0xFE57
/* 8192C Regsiter Bit and Content definition */
-/* 0x0000h ~ 0x00FFh System Configuration */
+/* 0x0000h ~ 0x00FFh System Configuration */
/* 2 SYS_ISO_CTRL */
#define ISO_MD2PP BIT(0)
@@ -914,7 +914,7 @@ Current IOREG MAP
/* 2SYS_CFG */
#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
/* 2 Function Enable Registers */
/* 2 CR */
@@ -975,9 +975,9 @@ Current IOREG MAP
#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
-#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
-#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
#define QUEUE_LOW 1
#define QUEUE_NORMAL 2
@@ -995,7 +995,7 @@ Current IOREG MAP
#define _LLT_OP(x) (((x) & 0x3) << 30)
#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
/* 2RQPN */
#define _HPQ(x) ((x) & 0xFF)
#define _LPQ(x) (((x) & 0xFF) << 8)
@@ -1019,7 +1019,7 @@ Current IOREG MAP
/* 2 TXDMA_OFFSET_CHK */
#define DROP_DATA_EN BIT(9)
-/* 0x0280h ~ 0x028Bh RX DMA Configuration */
+/* 0x0280h ~ 0x028Bh RX DMA Configuration */
/* REG_RXDMA_CONTROL, 0x0286h */
@@ -1028,7 +1028,7 @@ Current IOREG MAP
#define RXDMA_IDLE BIT(17)
#define RW_RELEASE_EN BIT(18)
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
/* 2 FWHW_TXQ_CTRL */
#define EN_AMPDU_RTY_NEW BIT(7)
@@ -1040,7 +1040,7 @@ Current IOREG MAP
#define RETRY_LIMIT_SHORT_SHIFT 8
#define RETRY_LIMIT_LONG_SHIFT 0
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
/* 2 EDCA setting */
#define AC_PARAM_TXOP_LIMIT_OFFSET 16
@@ -1071,7 +1071,7 @@ Current IOREG MAP
#define AcmHw_ViqStatus BIT(5)
#define AcmHw_VoqStatus BIT(6)
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
/* 2APSD_CTRL */
#define APSDOFF BIT(6)
#define APSDOFF_STATUS BIT(7)
@@ -1128,7 +1128,7 @@ Current IOREG MAP
#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
-/* RTL8188E SDIO Configuration */
+/* RTL8188E SDIO Configuration */
/* I/O bus domain address mapping */
#define SDIO_LOCAL_BASE 0x10250000
@@ -1264,7 +1264,7 @@ Current IOREG MAP
#define SDIO_TX_FREE_PG_QUEUE 4
#define SDIO_TX_FIFO_PAGE_SZ 128
-/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
/* 2 USB Information (0xFE17) */
#define USB_IS_HIGH_SPEED 0
@@ -1331,7 +1331,7 @@ Current IOREG MAP
/* 8192C EEPROM/EFUSE share register definition. */
-/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
+/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
#define EEPROM_TX_PWR_INX_88E 0x10
#define EEPROM_ChannelPlan_88E 0xB8
@@ -1362,7 +1362,7 @@ Current IOREG MAP
/* RTL88ES */
#define EEPROM_MAC_ADDR_88ES 0x11A
-/* EEPROM/Efuse Value Type */
+/* EEPROM/Efuse Value Type */
#define EETYPE_TX_PWR 0x0
/* Default Value for EEPROM or EFUSE!!! */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 8cafd7a..3d34702 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -69,7 +69,7 @@ struct evt_priv {
bool c2h_wk_alive;
struct rtw_cbuf *c2h_queue;
#define C2H_QUEUE_MAX_LEN 10
- ATOMIC_T event_seq;
+ atomic_t event_seq;
u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
u8 *evt_allocated_buf;
u32 evt_done_cnt;
@@ -478,8 +478,7 @@ struct getrfintfs_parm {
u8 rfintfs;
};
-struct Tx_Beacon_param
-{
+struct Tx_Beacon_param {
struct wlan_bssid_ex network;
};
@@ -625,14 +624,14 @@ struct setratable_parm {
};
struct getratable_parm {
- uint rsvd;
+ uint rsvd;
};
struct getratable_rsp {
- u8 ss_ForceUp[NumRates];
- u8 ss_ULevel[NumRates];
- u8 ss_DLevel[NumRates];
- u8 count_judge[NumRates];
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
};
/* to get TX,RX retry count */
@@ -715,26 +714,22 @@ struct set_ch_parm {
};
/*H2C Handler index: 59 */
-struct SetChannelPlan_param
-{
+struct SetChannelPlan_param {
u8 channel_plan;
};
/*H2C Handler index: 60 */
-struct LedBlink_param
-{
+struct LedBlink_param {
struct LED_871x *pLed;
};
/*H2C Handler index: 61 */
-struct SetChannelSwitch_param
-{
+struct SetChannelSwitch_param {
u8 new_ch_no;
};
/*H2C Handler index: 62 */
-struct TDLSoption_param
-{
+struct TDLSoption_param {
u8 addr[ETH_ALEN];
u8 option;
};
@@ -763,52 +758,57 @@ Result:
#define H2C_CMD_OVERFLOW 0x06
#define H2C_RESERVED 0x07
-u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
+u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
u8 rtw_setstandby_cmd(struct adapter *padapter, uint action);
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
int ssid_num, struct rtw_ieee80211_channel *ch,
int ch_num);
-u8 rtw_createbss_cmd(struct adapter *padapter);
-u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
- unsigned int sz);
-u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
+u8 rtw_createbss_cmd(struct adapter *padapter);
+u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
+ unsigned int sz);
+u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
-u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
-u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork);
-u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
-u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype);
-u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setbbreg_cmd(struct adapter * padapter, u8 offset, u8 val);
-u8 rtw_setrfreg_cmd(struct adapter * padapter, u8 offset, u32 val);
-u8 rtw_getbbreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
-u8 rtw_getrfreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
-u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
-u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table);
-u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
-
-u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset,u8 *pval);
-u8 rtw_setfwdig_cmd(struct adapter*padapter, u8 type);
-u8 rtw_setfwra_cmd(struct adapter*padapter, u8 type);
-
-u8 rtw_addbareq_cmd(struct adapter*padapter, u8 tid, u8 *addr);
+u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry,
+ u8 enqueue);
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork);
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms,
+ bool enqueue);
+u8 rtw_setopmode_cmd(struct adapter *padapter,
+ enum ndis_802_11_network_infra networktype);
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val);
+u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val);
+u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
+u8 rtw_setrttbl_cmd(struct adapter *padapter,
+ struct setratable_parm *prate_table);
+u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
+
+u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+u8 rtw_setfwdig_cmd(struct adapter *padapter, u8 type);
+u8 rtw_setfwra_cmd(struct adapter *padapter, u8 type);
+
+u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr);
u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter);
-u8 rtw_lps_ctrl_wk_cmd(struct adapter*padapter, u8 lps_ctrl_type, u8 enqueue);
-u8 rtw_rpt_timer_cfg_cmd(struct adapter*padapter, u16 minRptTime);
+u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue);
+u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 minRptTime);
- u8 rtw_antenna_select_cmd(struct adapter*padapter, u8 antenna,u8 enqueue);
-u8 rtw_ps_cmd(struct adapter*padapter);
+u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue);
+u8 rtw_ps_cmd(struct adapter *padapter);
#ifdef CONFIG_88EU_AP_MODE
-u8 rtw_chk_hi_queue_cmd(struct adapter*padapter);
+u8 rtw_chk_hi_queue_cmd(struct adapter *padapter);
#endif
-u8 rtw_set_ch_cmd(struct adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
-u8 rtw_set_chplan_cmd(struct adapter*padapter, u8 chplan, u8 enqueue);
-u8 rtw_led_blink_cmd(struct adapter*padapter, struct LED_871x * pLed);
-u8 rtw_set_csa_cmd(struct adapter*padapter, u8 new_ch_no);
+u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset,
+ u8 enqueue);
+u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue);
+u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed);
+u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no);
u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option);
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
@@ -820,7 +820,7 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
void rtw_createbss_cmd_callback(struct adapter *adapt, struct cmd_obj *pcmd);
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
-void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
@@ -913,8 +913,7 @@ enum rtw_h2c_cmd {
#define _SetRFReg_CMD_ _Write_RFREG_CMD_
#ifdef _RTW_CMD_C_
-static struct _cmd_callback rtw_cmd_callback[] =
-{
+static struct _cmd_callback rtw_cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
{GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index b2672c3..904fea1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -108,7 +108,7 @@ enum RT_CUSTOMER_ID {
RT_CID_CC_C = 38,
RT_CID_819x_Xavi = 39,
RT_CID_819x_FUNAI_TV = 40,
- RT_CID_819x_ALPHA_WD=41,
+ RT_CID_819x_ALPHA_WD = 41,
};
struct eeprom_priv {
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index cee6b5e..df51355 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -135,7 +135,7 @@ void EFUSE_GetEfuseDefinition(struct adapter *adapt, u8 type, u8 type1,
u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
-void Efuse_PowerSwitch(struct adapter *adapt,u8 bWrite,u8 PwrState);
+void Efuse_PowerSwitch(struct adapter *adapt, u8 bWrite, u8 PwrState);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data, bool test);
int Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data,
bool test);
diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
index eb6f0e5..3d1dfcc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_io.h
+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
@@ -123,7 +123,7 @@ struct _io_ops {
u8 *pmem);
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
u8 *pmem);
- u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
+ u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
};
@@ -213,7 +213,7 @@ struct reg_protocol_wt {
u32 Value;
#else
/* DW1 */
- u32 Reserved1 :4;
+ u32 Reserved1:4;
u32 NumOfTrans:4;
u32 Reserved2:24;
/* DW2 */
@@ -254,7 +254,7 @@ struct io_priv {
};
uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue);
+void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
struct io_req *alloc_ioreq(struct io_queue *pio_q);
@@ -368,20 +368,20 @@ void free_io_queue(struct adapter *adapter);
void async_bus_io(struct io_queue *pio_q);
void bus_sync_io(struct io_queue *pio_q);
u32 _ioreq2rwmem(struct io_queue *pio_q);
-void dev_power_down(struct adapter * Adapter, u8 bpwrup);
-
-#define PlatformEFIOWrite1Byte(_a,_b,_c) \
- rtw_write8(_a,_b,_c)
-#define PlatformEFIOWrite2Byte(_a,_b,_c) \
- rtw_write16(_a,_b,_c)
-#define PlatformEFIOWrite4Byte(_a,_b,_c) \
- rtw_write32(_a,_b,_c)
-
-#define PlatformEFIORead1Byte(_a,_b) \
- rtw_read8(_a,_b)
-#define PlatformEFIORead2Byte(_a,_b) \
- rtw_read16(_a,_b)
-#define PlatformEFIORead4Byte(_a,_b) \
- rtw_read32(_a,_b)
+void dev_power_down(struct adapter *Adapter, u8 bpwrup);
+
+#define PlatformEFIOWrite1Byte(_a, _b, _c) \
+ rtw_write8(_a, _b, _c)
+#define PlatformEFIOWrite2Byte(_a, _b, _c) \
+ rtw_write16(_a, _b, _c)
+#define PlatformEFIOWrite4Byte(_a, _b, _c) \
+ rtw_write32(_a, _b, _c)
+
+#define PlatformEFIORead1Byte(_a, _b) \
+ rtw_read8(_a, _b)
+#define PlatformEFIORead2Byte(_a, _b) \
+ rtw_read16(_a, _b)
+#define PlatformEFIORead4Byte(_a, _b) \
+ rtw_read32(_a, _b)
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index 49efb23..187fe1f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -28,10 +28,10 @@ typedef u8 NDIS_802_11_PMKID_VALUE[16];
u8 rtw_set_802_11_add_key(struct adapter *adapt, struct ndis_802_11_key *key);
u8 rtw_set_802_11_authentication_mode(struct adapter *adapt,
enum ndis_802_11_auth_mode authmode);
-u8 rtw_set_802_11_bssid(struct adapter*adapter, u8 *bssid);
+u8 rtw_set_802_11_bssid(struct adapter *adapter, u8 *bssid);
u8 rtw_set_802_11_add_wep(struct adapter *adapter, struct ndis_802_11_wep *wep);
u8 rtw_set_802_11_disassociate(struct adapter *adapter);
-u8 rtw_set_802_11_bssid_list_scan(struct adapter*adapter,
+u8 rtw_set_802_11_bssid_list_scan(struct adapter *adapter,
struct ndis_802_11_ssid *pssid,
int ssid_max_num);
u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter,
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 6949922..ec0c6cb 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -70,15 +70,15 @@ int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
u16 addr, u32 value, u32 mask);
#define rtw_IOL_append_WB_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) ,(mask))
+ _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) , (mask))
#define rtw_IOL_append_WW_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value),(mask))
+ _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value), (mask))
#define rtw_IOL_append_WD_cmd(xmit_frame, addr, value, mask) \
_rtw_IOL_append_WD_cmd((xmit_frame), (addr), (value), (mask))
#define rtw_IOL_append_WRF_cmd(xmit_frame, rf_path, addr, value, mask) \
- _rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask))
+ _rtw_IOL_append_WRF_cmd((xmit_frame), (rf_path), (addr), (value), (mask))
u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
-void rtw_IOL_cmd_buf_dump(struct adapter *Adapter,int buf_len,u8 *pbuf);
+void rtw_IOL_cmd_buf_dump(struct adapter *Adapter, int buf_len, u8 *pbuf);
#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index d0da4fd..0da4e27 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -147,7 +147,7 @@ struct LED_871x {
enum LED_STRATEGY_871x {
SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option.*/
- SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
+ SW_LED_MODE1 = 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave
* 8187 minicard. */
SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm
@@ -182,7 +182,7 @@ struct led_priv{
void BlinkTimerCallback(void *data);
void BlinkWorkItemCallback(struct work_struct *work);
-void ResetLedStatus(struct LED_871x * pLed);
+void ResetLedStatus(struct LED_871x *pLed);
void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
enum LED_PIN_871x LedPin);
@@ -190,7 +190,7 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
void DeInitLed871x(struct LED_871x *pLed);
/* hal... */
-void BlinkHandler(struct LED_871x * pLed);
+void BlinkHandler(struct LED_871x *pLed);
void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 4a7143e..6cd988f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -129,17 +129,17 @@ struct rt_link_detect {
struct profile_info {
u8 ssidlen;
- u8 ssid[ WLAN_SSID_MAXLEN ];
- u8 peermac[ ETH_ALEN ];
+ u8 ssid[WLAN_SSID_MAXLEN];
+ u8 peermac[ETH_ALEN];
};
struct tx_invite_req_info {
u8 token;
u8 benable;
- u8 go_ssid[ WLAN_SSID_MAXLEN ];
+ u8 go_ssid[WLAN_SSID_MAXLEN];
u8 ssidlen;
- u8 go_bssid[ ETH_ALEN ];
- u8 peer_macaddr[ ETH_ALEN ];
+ u8 go_bssid[ETH_ALEN];
+ u8 peer_macaddr[ETH_ALEN];
u8 operating_ch; /* This information will be set by using the
* p2p_set op_ch=x */
u8 peer_ch; /* The listen channel for peer P2P device */
@@ -182,9 +182,9 @@ struct tx_nego_req_info {
};
struct group_id_info {
- u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of
+ u8 go_device_addr[ETH_ALEN]; /* The GO's device address of
* this P2P group */
- u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */
+ u8 ssid[WLAN_SSID_MAXLEN]; /* The SSID of this P2P group */
};
struct scan_limit_info {
@@ -388,7 +388,7 @@ struct mlme_priv {
u8 *assoc_rsp;
u32 assoc_rsp_len;
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
* in 802.11g BSS) */
int num_sta_non_erp;
@@ -472,7 +472,7 @@ void rtw_join_timeout_handler(void *FunctionContext);
void _rtw_scan_timeout_handler(void *FunctionContext);
void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
int rtw_init_mlme_priv(struct adapter *adapter);
-void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
+void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv,
int keyid, u8 set_tx);
@@ -508,7 +508,7 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY==state)
+ if (_FW_UNDER_SURVEY == state)
pmlmepriv->bScanInProcess = true;
}
@@ -516,7 +516,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY==state)
+ if (_FW_UNDER_SURVEY == state)
pmlmepriv->bScanInProcess = false;
}
@@ -526,48 +526,38 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
*/
static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, state) == true)
pmlmepriv->fw_state ^= state;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, state);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned++;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned--;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, int val)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned = val;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
u16 rtw_get_capability(struct wlan_bssid_ex *bss);
@@ -582,7 +572,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
void rtw_indicate_disconnect(struct adapter *adapter);
void rtw_indicate_connect(struct adapter *adapter);
-void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
+void rtw_indicate_scan_done(struct adapter *padapter, bool aborted);
void rtw_scan_abort(struct adapter *adapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
@@ -598,7 +588,7 @@ void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
void _rtw_join_timeout_handler(struct adapter *adapter);
void rtw_scan_timeout_handler(struct adapter *adapter);
- void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
#define rtw_is_scan_deny(adapter) false
#define rtw_clear_scan_deny(adapter) do {} while (0)
#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
@@ -615,7 +605,7 @@ int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork);
struct wlan_network *_rtw_dequeue_network(struct __queue *queue);
- struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
void _rtw_free_network(struct mlme_priv *pmlmepriv,
@@ -624,7 +614,7 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
struct wlan_network *pnetwork);
-struct wlan_network* _rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr);
void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
@@ -650,6 +640,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
-void rtw_stassoc_hw_rpt(struct adapter *adapter,struct sta_info *psta);
+void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta);
#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index b1bfa2e..f0c982d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -241,7 +241,7 @@ struct mlme_handler {
struct action_handler {
unsigned int num;
- char* str;
+ char *str;
unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
};
@@ -401,7 +401,7 @@ struct p2p_oper_class_map {
struct mlme_ext_priv {
struct adapter *padapter;
u8 mlmeext_init;
- ATOMIC_T event_seq;
+ atomic_t event_seq;
u16 mgnt_seq;
unsigned char cur_channel;
@@ -484,7 +484,7 @@ void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void clear_cam_entry(struct adapter *padapter, u8 entry);
void invalidate_cam_all(struct adapter *padapter);
-void CAM_empty_entry(struct adapter * Adapter, u8 ucIndex);
+void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex);
int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
@@ -548,11 +548,11 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
void report_surveydone_event(struct adapter *padapter);
void report_del_sta_event(struct adapter *padapter,
unsigned char *addr, unsigned short reason);
-void report_add_sta_event(struct adapter *padapter, unsigned char* addr,
+void report_add_sta_event(struct adapter *padapter, unsigned char *addr,
int cam_idx);
void beacon_timing_control(struct adapter *padapter);
-extern u8 set_tx_beacon_cmd(struct adapter*padapter);
+extern u8 set_tx_beacon_cmd(struct adapter *padapter);
unsigned int setup_beacon_frame(struct adapter *padapter,
unsigned char *beacon_frame);
void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
@@ -574,7 +574,7 @@ int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt,
int wait_ms);
void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr,
u8 dialogToken, u8 success);
-void issue_p2p_invitation_request(struct adapter *padapter, u8* raddr);
+void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr);
#endif /* CONFIG_88EU_P2P */
void issue_beacon(struct adapter *padapter, int timeout_ms);
void issue_probersp(struct adapter *padapter, unsigned char *da,
@@ -587,7 +587,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta,
void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
u8 *da);
s32 issue_probereq_ex(struct adapter *adapter, struct ndis_802_11_ssid *pssid,
- u8* da, int try_cnt, int wait_ms);
+ u8 *da, int try_cnt, int wait_ms);
int issue_nulldata(struct adapter *padapter, unsigned char *da,
unsigned int power_mode, int try_cnt, int wait_ms);
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp.h b/drivers/staging/rtl8188eu/include/rtw_mp.h
index 59bdbb5..ffa299b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp.h
@@ -477,19 +477,19 @@ void Hal_SetChannel(struct adapter *pAdapter);
void Hal_SetAntennaPathPower(struct adapter *pAdapter);
s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
-void Hal_GetPowerTracking(struct adapter *padapter, u8 * enable);
+void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable);
void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value);
void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter);
-void Hal_MPT_CCKTxPowerAdjust(struct adapter * Adapter, bool bInCH14);
+void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14);
void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven);
-void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 * TxPower);
-void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 * TxPower);
+void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 *TxPower);
+void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 *TxPower);
void Hal_TriggerRFThermalMeter(struct adapter *pAdapter);
u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter);
void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart);
void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart);
void Hal_ProSetCrystalCap (struct adapter *pAdapter , u32 CrystalCapVal);
void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv);
-void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter ,bool bMain);
+void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter , bool bMain);
#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
index 494e90e..9388368 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
@@ -278,7 +278,7 @@ struct eeprom_rw_param {
struct mp_ioctl_handler {
u32 paramsize;
- s32 (*handler)(struct oid_par_priv* poid_par_priv);
+ s32 (*handler)(struct oid_par_priv *poid_par_priv);
u32 oid;
};
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index d4b8acb..4a0e9ff 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -99,12 +99,7 @@ struct reportpwrstate_parm {
static inline void _init_pwrlock(struct semaphore *plock)
{
- _rtw_init_sema(plock, 1);
-}
-
-static inline void _free_pwrlock(struct semaphore *plock)
-{
- _rtw_free_sema(plock);
+ sema_init(plock, 1);
}
static inline void _enter_pwrlock(struct semaphore *plock)
@@ -114,7 +109,7 @@ static inline void _enter_pwrlock(struct semaphore *plock)
static inline void _exit_pwrlock(struct semaphore *plock)
{
- _rtw_up_sema(plock);
+ up(plock);
}
#define LPS_DELAY_TIME 1*HZ /* 1 sec */
@@ -251,7 +246,6 @@ struct pwrctrl_priv {
(pwrctrl)->pwr_state_check_interval)
void rtw_init_pwrctrl_priv(struct adapter *adapter);
-void rtw_free_pwrctrl_priv(struct adapter *adapter);
void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
u8 bcn_ant_mode);
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index 23c7814..937cad8 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -354,7 +354,7 @@ static const unsigned long K[64] = {
#define RORc(x, y) \
(((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long)((y)&31)) | \
((unsigned long)(x) << (unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL)
-#define Ch(x, y ,z) (z ^ (x & (y ^ z)))
+#define Ch(x, y , z) (z ^ (x & (y ^ z)))
#define Maj(x, y, z) (((x | y) & z) | (x & y))
#define S(x, n) RORc((x), (n))
#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
diff --git a/drivers/staging/rtl8188eu/include/usb_ops.h b/drivers/staging/rtl8188eu/include/usb_ops.h
index df34237..7d33477 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops.h
@@ -77,7 +77,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
{
int ret = false;
int value;
- value = ATOMIC_INC_RETURN(&dvobj->continual_urb_error);
+ value = atomic_inc_return(&dvobj->continual_urb_error);
if (value > MAX_CONTINUAL_URB_ERR) {
DBG_88E("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
dvobj, value, MAX_CONTINUAL_URB_ERR);
@@ -91,7 +91,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
*/
static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
{
- ATOMIC_SET(&dvobj->continual_urb_error, 0);
+ atomic_set(&dvobj->continual_urb_error, 0);
}
#define USB_HIGH_SPEED_BULK_SIZE 512
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 84e5199..2e7307f 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -1080,7 +1080,7 @@ enum P2P_PROTO_WK_ID {
P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
P2P_RO_CH_WK = 6,
};
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index ae54587..dec9925 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1108,7 +1108,6 @@ static int rtw_wx_set_wap(struct net_device *dev,
union iwreq_data *awrq,
char *extra)
{
- unsigned long irqL;
uint ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct sockaddr *temp = (struct sockaddr *)awrq;
@@ -1137,7 +1136,7 @@ static int rtw_wx_set_wap(struct net_device *dev,
}
authmode = padapter->securitypriv.ndisauthtype;
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
@@ -1156,14 +1155,14 @@ static int rtw_wx_set_wap(struct net_device *dev,
if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN))) {
if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
ret = -1;
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
goto exit;
}
break;
}
}
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
rtw_set_802_11_authentication_mode(padapter, authmode);
/* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
@@ -1248,7 +1247,6 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- unsigned long irqL;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
@@ -1321,11 +1319,11 @@ _func_enter_;
DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
_status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
DBG_88E("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n");
}
@@ -1392,7 +1390,6 @@ _func_exit_;
static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- unsigned long irqL;
struct list_head *plist, *phead;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -1434,13 +1431,13 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
while (check_fwstate(pmlmepriv, wait_status)) {
- rtw_msleep_os(30);
+ msleep(30);
cnt++;
if (cnt > wait_for_surveydone)
break;
}
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -1463,7 +1460,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
wrqu->data.length = ev-extra;
wrqu->data.flags = 0;
@@ -1482,7 +1479,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- unsigned long irqL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
@@ -1532,7 +1528,7 @@ static int rtw_wx_set_essid(struct net_device *dev,
src_ssid = ndis_ssid.Ssid;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
@@ -1566,14 +1562,14 @@ static int rtw_wx_set_essid(struct net_device *dev,
if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
ret = -1;
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
goto exit;
}
break;
}
}
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("set ssid: set_802_11_auth. mode =%d\n", authmode));
rtw_set_802_11_authentication_mode(padapter, authmode);
@@ -2574,7 +2570,6 @@ static int rtw_get_ap_info(struct net_device *dev,
{
int ret = 0;
u32 cnt = 0, wpa_ielen;
- unsigned long irqL;
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
@@ -2593,7 +2588,7 @@ static int rtw_get_ap_info(struct net_device *dev,
}
while ((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) {
- rtw_msleep_os(30);
+ msleep(30);
cnt++;
if (cnt > 100)
break;
@@ -2609,7 +2604,7 @@ static int rtw_get_ap_info(struct net_device *dev,
goto exit;
}
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -2622,7 +2617,7 @@ static int rtw_get_ap_info(struct net_device *dev,
if (hwaddr_aton_i(data, bssid)) {
DBG_88E("Invalid BSSID '%s'.\n", (u8 *)data);
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
return -EINVAL;
}
@@ -2646,7 +2641,7 @@ static int rtw_get_ap_info(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (pdata->length >= 34) {
if (copy_to_user(pdata->pointer+32, (u8 *)&pdata->flags, 1)) {
@@ -3091,7 +3086,6 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3113,7 +3107,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3143,7 +3137,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(attr_content_str, "\n\nM = 0000");
@@ -3163,7 +3157,6 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3186,7 +3179,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3227,7 +3220,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(go_devadd_str, "\n\ndev_add = NULL");
@@ -3250,7 +3243,6 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3271,7 +3263,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3308,7 +3300,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(dev_type_str, "\n\nN = 00");
@@ -3330,7 +3322,6 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3351,7 +3342,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3380,7 +3371,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(dev_name_str, "\n\nN = 0000");
@@ -3400,7 +3391,6 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3423,7 +3413,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3455,7 +3445,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch) {
sprintf(inv_proc_str, "\nIP =-1");
@@ -3480,7 +3470,6 @@ static int rtw_p2p_connect(struct net_device *dev,
u8 peerMAC[ETH_ALEN] = {0x00};
int jj, kk;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3506,7 +3495,7 @@ static int rtw_p2p_connect(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3524,7 +3513,7 @@ static int rtw_p2p_connect(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (uintPeerChannel) {
_rtw_memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
@@ -3569,7 +3558,6 @@ static int rtw_p2p_invite_req(struct net_device *dev,
u8 attr_content[50] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
- unsigned long irqL;
struct tx_invite_req_info *pinvite_req_info = &pwdinfo->invitereq_info;
/* The input data contains two informations. */
@@ -3602,7 +3590,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
pinvite_req_info->peer_macaddr[jj] = key_2char2num(extra[kk], extra[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3639,7 +3627,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (uintPeerChannel) {
/* Store the GO's bssid */
@@ -3712,7 +3700,6 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
u8 attr_content[100] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
- unsigned long irqL;
/* The input data contains two informations. */
/* 1. First information is the MAC address which wants to issue the provisioning discovery request frame. */
@@ -3753,7 +3740,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
return ret;
}
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3799,7 +3786,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (uintPeerChannel) {
DBG_88E("[%s] peer channel: %d!\n", __func__, uintPeerChannel);
@@ -4132,7 +4119,6 @@ static int rtw_dbg_port(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- unsigned long irqL;
int ret = 0;
u8 major_cmd, minor_cmd;
u16 arg;
@@ -4448,7 +4434,7 @@ static int rtw_dbg_port(struct net_device *dev,
#ifdef CONFIG_88EU_AP_MODE
DBG_88E("sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
#endif
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
@@ -4486,7 +4472,7 @@ static int rtw_dbg_port(struct net_device *dev,
}
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
}
break;
case 0x0c:/* dump rx/tx packet */
@@ -5251,7 +5237,6 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
{
- unsigned long irqL;
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
@@ -5271,13 +5256,13 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if (psta) {
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&psta->asoc_list)) {
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
psta = NULL;
} else {
@@ -7053,7 +7038,7 @@ static int rtw_mp_ctx(struct net_device *dev,
struct mp_priv *pmp_priv = &padapter->mppriv;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
@@ -7228,25 +7213,25 @@ static int rtw_mp_thermal(struct net_device *dev,
if (copy_from_user(extra, wrqu->pointer, wrqu->length))
return -EFAULT;
- bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
+ bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
- Hal_GetThermalMeter(padapter, &val);
+ Hal_GetThermalMeter(padapter, &val);
- if (bwrite == 0) {
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if (2 > max_available_size) {
- DBG_88E("no available efuse!\n");
- return -EFAULT;
- }
- if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
- DBG_88E("rtw_efuse_map_write error\n");
- return -EFAULT;
- } else {
- sprintf(extra, " efuse write ok :%d", val);
- }
- } else {
- sprintf(extra, "%d", val);
- }
+ if (bwrite == 0) {
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (2 > max_available_size) {
+ DBG_88E("no available efuse!\n");
+ return -EFAULT;
+ }
+ if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
+ DBG_88E("rtw_efuse_map_write error\n");
+ return -EFAULT;
+ } else {
+ sprintf(extra, " efuse write ok :%d", val);
+ }
+ } else {
+ sprintf(extra, "%d", val);
+ }
wrqu->length = strlen(extra);
return 0;
@@ -7268,7 +7253,7 @@ static int rtw_mp_reset_stats(struct net_device *dev,
/* reset phy counter */
write_bbreg(padapter, 0xf14, BIT16, 0x1);
- rtw_msleep_os(10);
+ msleep(10);
write_bbreg(padapter, 0xf14, BIT16, 0x0);
return 0;
@@ -7545,7 +7530,7 @@ static int rtw_mp_get(struct net_device *dev,
break;
}
- rtw_msleep_os(10); /* delay 5ms for sending pkt before exit adb shell operation */
+ msleep(10); /* delay 5ms for sending pkt before exit adb shell operation */
return 0;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb..68f98fa 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
return dscp >> 5;
}
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -761,7 +762,7 @@ void rtw_stop_drv_threads(struct adapter *padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
/* Below is to termindate rtw_cmd_thread & event_thread... */
- _rtw_up_sema(&padapter->cmdpriv.cmd_queue_sema);
+ up(&padapter->cmdpriv.cmd_queue_sema);
if (padapter->cmdThread)
_rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
@@ -924,7 +925,7 @@ _func_enter_;
rtw_hal_sreset_init(padapter);
- _rtw_spinlock_init(&padapter->br_ext_lock);
+ spin_lock_init(&padapter->br_ext_lock);
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
@@ -977,9 +978,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
}
#endif
-
- _rtw_spinlock_free(&padapter->br_ext_lock);
-
free_mlme_ext_priv(&padapter->mlmeextpriv);
rtw_free_cmd_priv(&padapter->cmdpriv);
@@ -993,8 +991,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
_rtw_free_recv_priv(&padapter->recvpriv);
- rtw_free_pwrctrl_priv(padapter);
-
rtw_hal_free_data(padapter);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw\n"));
@@ -1157,7 +1153,7 @@ netdev_open_error:
int rtw_ips_pwr_up(struct adapter *padapter)
{
int result;
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
DBG_88E("===> rtw_ips_pwr_up..............\n");
rtw_reset_drv_sw(padapter);
@@ -1171,7 +1167,7 @@ int rtw_ips_pwr_up(struct adapter *padapter)
void rtw_ips_pwr_down(struct adapter *padapter)
{
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
DBG_88E("===> rtw_ips_pwr_down...................\n");
padapter->bCardDisableWOHSM = true;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index a1ae727..8c3b077 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -52,7 +52,7 @@ u32 rtw_atoi(u8 *s)
}
if (flag == 1)
num = num * -1;
- return num;
+ return num;
}
inline u8 *_rtw_vmalloc(u32 sz)
@@ -161,20 +161,6 @@ void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead)
Caller must check if the list is empty before calling rtw_list_delete
*/
-void _rtw_init_sema(struct semaphore *sema, int init_val)
-{
- sema_init(sema, init_val);
-}
-
-void _rtw_free_sema(struct semaphore *sema)
-{
-}
-
-void _rtw_up_sema(struct semaphore *sema)
-{
- up(sema);
-}
-
u32 _rtw_down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
@@ -183,29 +169,10 @@ u32 _rtw_down_sema(struct semaphore *sema)
return _SUCCESS;
}
-void _rtw_mutex_init(struct mutex *pmutex)
-{
- mutex_init(pmutex);
-}
-
-void _rtw_mutex_free(struct mutex *pmutex)
-{
- mutex_destroy(pmutex);
-}
-
-void _rtw_spinlock_init(spinlock_t *plock)
-{
- spin_lock_init(plock);
-}
-
-void _rtw_spinlock_free(spinlock_t *plock)
-{
-}
-
void _rtw_init_queue(struct __queue *pqueue)
{
_rtw_init_listhead(&(pqueue->queue));
- _rtw_spinlock_init(&(pqueue->lock));
+ spin_lock_init(&(pqueue->lock));
}
u32 _rtw_queue_empty(struct __queue *pqueue)
@@ -221,11 +188,6 @@ u32 rtw_end_of_queue_search(struct list_head *head, struct list_head *plist)
return false;
}
-u32 rtw_get_current_time(void)
-{
- return jiffies;
-}
-
inline u32 rtw_systime_to_ms(u32 systime)
{
return systime * 1000 / HZ;
@@ -236,8 +198,7 @@ inline u32 rtw_ms_to_systime(u32 ms)
return ms * HZ / 1000;
}
-/* the input parameter start use the same unit as returned by
- * rtw_get_current_time */
+/* the input parameter start must be in jiffies */
inline s32 rtw_get_passing_time_ms(u32 start)
{
return rtw_systime_to_ms(jiffies-start);
@@ -260,102 +221,8 @@ void rtw_sleep_schedulable(int ms)
return;
}
-void rtw_msleep_os(int ms)
-{
- msleep((unsigned int)ms);
-}
-
-void rtw_usleep_os(int us)
-{
- if (1 < (us/1000))
- msleep(1);
- else
- msleep((us/1000) + 1);
-}
-
-void rtw_mdelay_os(int ms)
-{
- mdelay((unsigned long)ms);
-}
-
-void rtw_udelay_os(int us)
-{
- udelay((unsigned long)us);
-}
-
-void rtw_yield_os(void)
-{
- yield();
-}
-
#define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
-inline void rtw_suspend_lock_init(void)
-{
-}
-
-inline void rtw_suspend_lock_uninit(void)
-{
-}
-
-inline void rtw_lock_suspend(void)
-{
-}
-
-inline void rtw_unlock_suspend(void)
-{
-}
-
-inline void ATOMIC_SET(ATOMIC_T *v, int i)
-{
- atomic_set(v, i);
-}
-
-inline int ATOMIC_READ(ATOMIC_T *v)
-{
- return atomic_read(v);
-}
-
-inline void ATOMIC_ADD(ATOMIC_T *v, int i)
-{
- atomic_add(i, v);
-}
-
-inline void ATOMIC_SUB(ATOMIC_T *v, int i)
-{
- atomic_sub(i, v);
-}
-
-inline void ATOMIC_INC(ATOMIC_T *v)
-{
- atomic_inc(v);
-}
-
-inline void ATOMIC_DEC(ATOMIC_T *v)
-{
- atomic_dec(v);
-}
-
-inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
-{
- return atomic_add_return(i, v);
-}
-
-inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
-{
- return atomic_sub_return(i, v);
-}
-
-inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
-{
- return atomic_inc_return(v);
-}
-
-inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
-{
- return atomic_dec_return(v);
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
void *old_priv)
{
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 3852ff4..2a18b32 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -90,16 +90,16 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
u32 cur_time = 0;
if (psecuritypriv->last_mic_err_time == 0) {
- psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ psecuritypriv->last_mic_err_time = jiffies;
} else {
- cur_time = rtw_get_current_time();
+ cur_time = jiffies;
if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ) {
psecuritypriv->btkip_countermeasure = true;
psecuritypriv->last_mic_err_time = 0;
psecuritypriv->btkip_countermeasure_time = cur_time;
} else {
- psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ psecuritypriv->last_mic_err_time = jiffies;
}
}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 6cf71cc..a3c2bc5 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -159,7 +159,6 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
int bytes_written = 0;
struct android_wifi_priv_cmd priv_cmd;
- rtw_lock_suspend();
if (!ifr->ifr_data) {
ret = -EINVAL;
goto exit;
@@ -287,7 +286,6 @@ response:
ret = bytes_written;
}
exit:
- rtw_unlock_suspend();
kfree(command);
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 7d14779..0a341d6 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -71,7 +71,7 @@ struct rtw_usb_drv {
};
static struct rtw_usb_drv rtl8188e_usb_drv = {
- .usbdrv.name = (char *)"r8188eu",
+ .usbdrv.name = "r8188eu",
.usbdrv.probe = rtw_drv_init,
.usbdrv.disconnect = rtw_dev_remove,
.usbdrv.id_table = rtw_usb_id_tbl,
@@ -126,7 +126,7 @@ static u8 rtw_init_intf_priv(struct dvobj_priv *dvobj)
{
u8 rst = _SUCCESS;
- _rtw_mutex_init(&dvobj->usb_vendor_req_mutex);
+ mutex_init(&dvobj->usb_vendor_req_mutex);
dvobj->usb_alloc_vendor_req_buf = rtw_zmalloc(MAX_USB_IO_CTL_SIZE);
if (dvobj->usb_alloc_vendor_req_buf == NULL) {
@@ -144,7 +144,7 @@ static u8 rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
u8 rst = _SUCCESS;
kfree(dvobj->usb_alloc_vendor_req_buf);
- _rtw_mutex_free(&dvobj->usb_vendor_req_mutex);
+ mutex_destroy(&dvobj->usb_vendor_req_mutex);
return rst;
}
@@ -240,7 +240,7 @@ _func_enter_;
}
/* 3 misc */
- _rtw_init_sema(&(pdvobjpriv->usb_suspend_sema), 0);
+ sema_init(&(pdvobjpriv->usb_suspend_sema), 0);
rtw_reset_continual_urb_error(pdvobjpriv);
usb_get_dev(pusbd);
@@ -504,7 +504,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
int ret = 0;
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
_func_enter_;
@@ -586,7 +586,7 @@ int rtw_resume_process(struct adapter *padapter)
struct net_device *pnetdev;
struct pwrctrl_priv *pwrpriv = NULL;
int ret = -1;
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
_func_enter_;
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
@@ -657,7 +657,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
/* step 1-1., decide the chip_type via vid/pid */
- padapter->interface_type = RTW_USB;
chip_by_usb_id(padapter, pdid);
if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
@@ -865,11 +864,8 @@ static int __init rtw_drv_entry(void)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_entry\n"));
DBG_88E(DRV_NAME " driver version=%s\n", DRIVERVERSION);
- DBG_88E("build time: %s %s\n", __DATE__, __TIME__);
- rtw_suspend_lock_init();
-
- _rtw_mutex_init(&usb_drv->hw_init_mutex);
+ mutex_init(&usb_drv->hw_init_mutex);
usb_drv->drv_registered = true;
return usb_register(&usb_drv->usbdrv);
@@ -880,12 +876,10 @@ static void __exit rtw_drv_halt(void)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_halt\n"));
DBG_88E("+rtw_drv_halt\n");
- rtw_suspend_lock_uninit();
-
usb_drv->drv_registered = false;
usb_deregister(&usb_drv->usbdrv);
- _rtw_mutex_free(&usb_drv->hw_init_mutex);
+ mutex_destroy(&usb_drv->hw_init_mutex);
DBG_88E("-rtw_drv_halt\n");
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 4c71e3b..7e3f2fa 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -146,7 +146,7 @@ _func_enter_;
}
haldata = GET_HAL_DATA(padapter);
- haldata->srestpriv.last_tx_complete_time = rtw_get_current_time();
+ haldata->srestpriv.last_tx_complete_time = jiffies;
check_completion:
rtw_sctx_done_err(&pxmitbuf->sctx,
@@ -186,7 +186,7 @@ _func_enter_;
goto exit;
}
- _enter_critical(&pxmitpriv->lock, &irqL);
+ spin_lock_irqsave(&pxmitpriv->lock, irqL);
switch (addr) {
case VO_QUEUE_INX:
@@ -213,7 +213,7 @@ _func_enter_;
break;
}
- _exit_critical(&pxmitpriv->lock, &irqL);
+ spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
purb = pxmitbuf->pxmit_urb[0];
@@ -230,7 +230,7 @@ _func_enter_;
if (!status) {
struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
- haldata->srestpriv.last_tx_time = rtw_get_current_time();
+ haldata->srestpriv.last_tx_time = jiffies;
} else {
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
DBG_88E("usb_write_port, status =%d\n", status);
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 2e586c0..9005971 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -156,7 +156,6 @@ void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
void rtw_os_xmit_schedule(struct adapter *padapter)
{
- unsigned long irql;
struct xmit_priv *pxmitpriv;
if (!padapter)
@@ -164,12 +163,12 @@ void rtw_os_xmit_schedule(struct adapter *padapter)
pxmitpriv = &padapter->xmitpriv;
- _enter_critical_bh(&pxmitpriv->lock, &irql);
+ spin_lock_bh(&pxmitpriv->lock);
if (rtw_txframes_pending(padapter))
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
}
static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
@@ -194,13 +193,12 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- unsigned long irql;
struct list_head *phead, *plist;
struct sk_buff *newskb;
struct sta_info *psta = NULL;
s32 res;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -230,12 +228,12 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__);
pxmitpriv->tx_drop++;
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
return false; /* Caller shall tx this multicast frame via normal way. */
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
dev_kfree_skb_any(skb);
return true;
}
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 1260f10..eb33c51 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -144,7 +144,7 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
return;
}
- for (j = 0 ; j < pTriple->NumChnls; j++) {
+ for (j = 0; j < pTriple->NumChnls; j++) {
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] =
pTriple->MaxTxPowerInDbm;
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index fb7683f..eeea5026 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -87,7 +87,10 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define CIE_WATCHDOG_TH 1
#define GET_CIE_WATCHDOG(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
-#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
+static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
+{
+ GET_CIE_WATCHDOG(__pIeeeDev) = 0;
+}
#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 2cace9a..4a35f9b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -30,7 +30,7 @@
#include "rtl_dm.h"
#include "rtl_wx.h"
-extern int WDCAPARA_ADD[];
+static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
void rtl8192e_start_beacon(struct net_device *dev)
{
@@ -193,11 +193,12 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
dm_init_edca_turbo(dev);
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[pAcParam])) <<
+ u4bAcParam = (((le16_to_cpu(
+ qos_parameters->tx_op_limit[pAcParam])) <<
AC_PARAM_TXOP_LIMIT_OFFSET) |
- (((u32)(qos_parameters->cw_max[pAcParam])) <<
+ ((le16_to_cpu(qos_parameters->cw_max[pAcParam])) <<
AC_PARAM_ECW_MAX_OFFSET) |
- (((u32)(qos_parameters->cw_min[pAcParam])) <<
+ ((le16_to_cpu(qos_parameters->cw_min[pAcParam])) <<
AC_PARAM_ECW_MIN_OFFSET) |
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
@@ -1271,7 +1272,7 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
pdesc->LastSeg = 1;
pdesc->TxBufferSize = skb->len;
- pdesc->TxBuffAddr = cpu_to_le32(mapping);
+ pdesc->TxBuffAddr = mapping;
}
void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
@@ -1301,7 +1302,7 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
entry_tmp->RATid = (u8)DESC_PACKET_TYPE_INIT;
}
entry->TxBufferSize = skb->len;
- entry->TxBuffAddr = cpu_to_le32(mapping);
+ entry->TxBuffAddr = mapping;
entry->OWN = 1;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 21e6ddd..5d6d304 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -1179,7 +1179,7 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s "
"bandwidth\n", priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz")
+ "20MHz" : "40MHz");
if (priv->rf_chip == RF_PSEUDO_11N) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index fa5603a..c46c65c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -28,7 +28,6 @@
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
-extern int hwwep;
void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d93caca..c01abc2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -562,8 +562,8 @@ void rtl8192_update_cap(struct net_device *dev, u16 cap)
}
static struct rtllib_qos_parameters def_qos_parameters = {
- {3, 3, 3, 3},
- {7, 7, 7, 7},
+ {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
+ {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},
{0, 0, 0, 0},
{0, 0, 0, 0}
@@ -585,8 +585,6 @@ static void rtl8192_update_beacon(void *data)
rtl8192_update_cap(dev, net->capability);
}
-int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
-
static void rtl8192_qos_activate(void *data)
{
struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
@@ -1845,7 +1843,7 @@ static void rtl8192_free_tx_ring(struct net_device *dev, unsigned int prio)
struct tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
+ pci_unmap_single(priv->pdev, entry->TxBuffAddr,
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
@@ -1950,7 +1948,7 @@ static void rtl8192_tx_isr(struct net_device *dev, int prio)
}
skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
+ pci_unmap_single(priv->pdev, entry->TxBuffAddr,
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
@@ -2011,7 +2009,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
- fc = header->frame_ctl;
+ fc = le16_to_cpu(header->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
pda_addr = header->addr1;
@@ -2101,7 +2099,7 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
dev_kfree_skb_any(skb);
return -1;
}
- entry->BufferAddress = cpu_to_le32(*mapping);
+ entry->BufferAddress = *mapping;
entry->Length = priv->rxbuffersize;
entry->OWN = 1;
@@ -2137,8 +2135,8 @@ static int rtl8192_alloc_tx_desc_ring(struct net_device *dev,
for (i = 0; i < entries; i++)
ring[i].NextDescAddress =
- cpu_to_le32((u32)dma + ((i + 1) % entries) *
- sizeof(*ring));
+ (u32)dma + ((i + 1) % entries) *
+ sizeof(*ring);
return 0;
}
@@ -2198,7 +2196,7 @@ void rtl8192_pci_resetdescring(struct net_device *dev)
__skb_dequeue(&ring->queue);
pci_unmap_single(priv->pdev,
- le32_to_cpu(entry->TxBuffAddr),
+ entry->TxBuffAddr,
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
@@ -2400,7 +2398,7 @@ static void rtl8192_rx_normal(struct net_device *dev)
}
}
done:
- pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
+ pdesc->BufferAddress = *((dma_addr_t *)skb->cb);
pdesc->OWN = 1;
pdesc->Length = priv->rxbuffersize;
if (priv->rx_idx[rx_queue_idx] == priv->rxringcount-1)
@@ -2692,7 +2690,7 @@ out:
}
-irqreturn_t rtl8192_interrupt(int irq, void *netdev)
+static irqreturn_t rtl8192_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index b015bf6..35fc116 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -188,6 +187,8 @@
#define MAX_RX_COUNT 64
#define MAX_TX_QUEUE_COUNT 9
+extern int hwwep;
+
enum RTL819x_PHY_PARAM {
RTL819X_PHY_MACPHY_REG = 0,
RTL819X_PHY_MACPHY_REG_PG = 1,
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 32fbbc9..adc6cc7 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -115,14 +115,14 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
if (ACT_ADDBARSP == type) {
RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
- tmp = cpu_to_le16(StatusCode);
+ tmp = StatusCode;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
}
- tmp = cpu_to_le16(pBA->BaParamSet.shortData);
+ tmp = pBA->BaParamSet.shortData;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
- tmp = cpu_to_le16(pBA->BaTimeoutValue);
+ tmp = pBA->BaTimeoutValue;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
@@ -178,10 +178,10 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
*tag ++= ACT_CAT_BA;
*tag ++= ACT_DELBA;
- tmp = cpu_to_le16(DelbaParamSet.shortData);
+ tmp = DelbaParamSet.shortData;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
- tmp = cpu_to_le16(ReasonCode);
+ tmp = ReasonCode;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 5ecd556..973342b 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -201,43 +201,6 @@ enum qos_ie_source {
#define AC_PARAM_SIZE 4
-#define GET_WMM_AC_PARAM_AIFSN(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 0, 4))
-#define SET_WMM_AC_PARAM_AIFSN(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 0, 4, _val)
-
-#define GET_WMM_AC_PARAM_ACM(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 4, 1))
-#define SET_WMM_AC_PARAM_ACM(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 4, 1, _val)
-
-#define GET_WMM_AC_PARAM_ACI(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 5, 2))
-#define SET_WMM_AC_PARAM_ACI(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 5, 2, _val)
-
-#define GET_WMM_AC_PARAM_ACI_AIFSN(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 0, 8))
-#define SET_WMM_AC_PARAM_ACI_AIFSN(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 0, 8, _val)
-
-#define GET_WMM_AC_PARAM_ECWMIN(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 8, 4))
-#define SET_WMM_AC_PARAM_ECWMIN(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 8, 4, _val)
-
-#define GET_WMM_AC_PARAM_ECWMAX(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 12, 4))
-#define SET_WMM_AC_PARAM_ECWMAX(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 12, 4, _val)
-
-#define GET_WMM_AC_PARAM_TXOP_LIMIT(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 16, 16))
-#define SET_WMM_AC_PARAM_TXOP_LIMIT(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 16, 16, _val)
-
-
-
#define WMM_PARAM_ELEMENT_SIZE (8+(4*AC_PARAM_SIZE))
enum qos_ele_subtype {
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 05ef49f..83f5f57 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -2896,7 +2896,7 @@ extern void HTConstructCapabilityElement(struct rtllib_device *ieee,
extern void HTConstructInfoElement(struct rtllib_device *ieee,
u8 *posHTInfo, u8 *len, u8 isEncrypt);
extern void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
- u8 *posRT2RTAgg, u8* len);
+ u8 *posRT2RTAgg, u8 *len);
extern void HTOnAssocRsp(struct rtllib_device *ieee);
extern void HTInitializeHTInfo(struct rtllib_device *ieee);
extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.c b/drivers/staging/rtl8192e/rtllib_crypt.c
index 86152d0..60c0ced 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt.c
@@ -183,7 +183,7 @@ struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name)
EXPORT_SYMBOL(rtllib_get_crypto_ops);
-static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
+static void *rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
static void rtllib_crypt_null_deinit(void *priv) {}
static struct lib80211_crypto_ops rtllib_crypt_null = {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index e51cb49..5e5c76b 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -443,13 +443,13 @@ static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
};
-int __init rtllib_crypto_ccmp_init(void)
+static int __init rtllib_crypto_ccmp_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_ccmp);
}
-void __exit rtllib_crypto_ccmp_exit(void)
+static void __exit rtllib_crypto_ccmp_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp);
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 5cfd73b..7b5366b 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -173,7 +173,7 @@ static inline u16 Mk16(u8 hi, u8 lo)
static inline u16 Mk16_le(u16 *v)
{
- return le16_to_cpu(*v);
+ return *v;
}
@@ -427,7 +427,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: replay detected: STA="
" %pM previous TSC %08x%04x received "
- "TSC %08x%04x\n",hdr->addr2,
+ "TSC %08x%04x\n", hdr->addr2,
tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
}
tkey->dot11RSNAStatsTKIPReplays++;
@@ -752,13 +752,13 @@ static struct lib80211_crypto_ops rtllib_crypt_tkip = {
};
-int __init rtllib_crypto_tkip_init(void)
+static int __init rtllib_crypto_tkip_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_tkip);
}
-void __exit rtllib_crypto_tkip_exit(void)
+static void __exit rtllib_crypto_tkip_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_tkip);
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index c4df6e0..b0e5f1f 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -270,13 +270,13 @@ static struct lib80211_crypto_ops rtllib_crypt_wep = {
};
-int __init rtllib_crypto_wep_init(void)
+static int __init rtllib_crypto_wep_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_wep);
}
-void __exit rtllib_crypto_wep_exit(void)
+static void __exit rtllib_crypto_wep_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_wep);
}
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index c59f67b..7537dae 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -75,12 +75,14 @@ do { \
if (rt_global_debug_component & component) \
printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
##args);\
-} while (0);
+} while (0)
#define assert(expr) \
+do { \
if (!(expr)) { \
printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
#expr, __FILE__, __func__, __LINE__); \
- }
+ } \
+} while (0);
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_endianfree.h b/drivers/staging/rtl8192e/rtllib_endianfree.h
index b268605..b189fa5 100644
--- a/drivers/staging/rtl8192e/rtllib_endianfree.h
+++ b/drivers/staging/rtl8192e/rtllib_endianfree.h
@@ -33,9 +33,9 @@
#define ReadEF2Byte(_ptr) EF2Byte(*((u16 *)(_ptr)))
#define ReadEF4Byte(_ptr) EF4Byte(*((u32 *)(_ptr)))
-#define WriteEF1Byte(_ptr, _val) (*((u8 *)(_ptr))) = EF1Byte(_val)
-#define WriteEF2Byte(_ptr, _val) (*((u16 *)(_ptr))) = EF2Byte(_val)
-#define WriteEF4Byte(_ptr, _val) (*((u32 *)(_ptr))) = EF4Byte(_val)
+#define WriteEF1Byte(_ptr, _val) ((*((u8 *)(_ptr))) = EF1Byte(_val))
+#define WriteEF2Byte(_ptr, _val) ((*((u16 *)(_ptr))) = EF2Byte(_val))
+#define WriteEF4Byte(_ptr, _val) ((*((u32 *)(_ptr))) = EF4Byte(_val))
#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
#define H2N1BYTE(_val) ((u8)(_val))
#define H2N2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
@@ -84,15 +84,6 @@
(~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen)) \
)
-#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u32 *)(__pStart)) = \
- EF4Byte( \
- LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ((((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset)) \
- );
-
-
#define BIT_LEN_MASK_16(__BitLen) \
(0xFFFF >> (16 - (__BitLen)))
@@ -109,21 +100,6 @@
BIT_LEN_MASK_16(__BitLen) \
)
-#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
- & \
- (~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen)) \
- )
-
-#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u16 *)(__pStart)) = \
- EF2Byte( \
- LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- | ((((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << \
- (__BitOffset)) \
- );
-
#define BIT_LEN_MASK_8(__BitLen) \
(0xFF >> (8 - (__BitLen)))
@@ -140,20 +116,6 @@
BIT_LEN_MASK_8(__BitLen) \
)
-#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
- & \
- (~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen)) \
- )
-
-#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u8 *)(__pStart)) = EF1Byte( \
- LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- | ((((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << \
- (__BitOffset)) \
- );
-
#define N_BYTE_ALIGMENT(__Value, __Aligment) \
((__Aligment == 1) ? (__Value) : (((__Value + __Aligment - 1) / \
__Aligment) * __Aligment))
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 51d46e0..136909e 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -237,7 +237,7 @@ static const struct file_operations fops = {
.release = single_release,
};
-int __init rtllib_init(void)
+static int __init rtllib_init(void)
{
struct proc_dir_entry *e;
@@ -257,7 +257,7 @@ int __init rtllib_init(void)
return 0;
}
-void __exit rtllib_exit(void)
+static void __exit rtllib_exit(void)
{
if (rtllib_proc) {
remove_proc_entry("debug_level", rtllib_proc);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 1a011b9..6c8a8e1 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -211,7 +211,7 @@ rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
- struct rtllib_hdr_3addr * hdr = (struct rtllib_hdr_3addr *)skb->data;
+ struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
rtllib_rx_mgt(ieee, skb, rx_stats);
@@ -490,7 +490,7 @@ void rtllib_indicate_packets(struct rtllib_device *ieee, struct rtllib_rxb **prx
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
+ len = sub_skb->len;
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
@@ -1224,7 +1224,7 @@ static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee,
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
+ len = sub_skb->len;
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
@@ -1632,13 +1632,13 @@ static int rtllib_qos_convert_ac_to_parameters(struct rtllib_qos_parameter_info
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci];
- qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
+ qos_param->cw_min[aci] = cpu_to_le16(ac_params->ecw_min_max & 0x0F);
- qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
+ qos_param->cw_max[aci] = cpu_to_le16((ac_params->ecw_min_max & 0xF0) >> 4);
qos_param->flag[aci] =
(ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
- qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
+ qos_param->tx_op_limit[aci] = ac_params->tx_op_limit;
}
return rc;
}
@@ -2260,9 +2260,9 @@ static inline int rtllib_network_init(
memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
network->capability = le16_to_cpu(beacon->capability);
network->last_scanned = jiffies;
- network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
- network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
- network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
+ network->time_stamp[0] = beacon->time_stamp[0];
+ network->time_stamp[1] = beacon->time_stamp[1];
+ network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
/* Where to pull this? beacon->listen_interval;*/
network->listen_interval = 0x0A;
network->rates_len = network->rates_ex_len = 0;
@@ -2528,29 +2528,30 @@ static inline void rtllib_process_probe_response(
"'%s' ( %pM ): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
- (beacon->capability & (1<<0xf)) ? '1' : '0',
- (beacon->capability & (1<<0xe)) ? '1' : '0',
- (beacon->capability & (1<<0xd)) ? '1' : '0',
- (beacon->capability & (1<<0xc)) ? '1' : '0',
- (beacon->capability & (1<<0xb)) ? '1' : '0',
- (beacon->capability & (1<<0xa)) ? '1' : '0',
- (beacon->capability & (1<<0x9)) ? '1' : '0',
- (beacon->capability & (1<<0x8)) ? '1' : '0',
- (beacon->capability & (1<<0x7)) ? '1' : '0',
- (beacon->capability & (1<<0x6)) ? '1' : '0',
- (beacon->capability & (1<<0x5)) ? '1' : '0',
- (beacon->capability & (1<<0x4)) ? '1' : '0',
- (beacon->capability & (1<<0x3)) ? '1' : '0',
- (beacon->capability & (1<<0x2)) ? '1' : '0',
- (beacon->capability & (1<<0x1)) ? '1' : '0',
- (beacon->capability & (1<<0x0)) ? '1' : '0');
+ (le16_to_cpu(beacon->capability) & (1<<0xf)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xe)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xd)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xc)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xb)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xa)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x9)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x8)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x7)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x6)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x5)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x4)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x3)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x2)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x1)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x0)) ? '1' : '0');
if (rtllib_network_init(ieee, beacon, network, stats)) {
RTLLIB_DEBUG_SCAN("Dropped '%s' ( %pM) via %s.\n",
escape_essid(info_element->data,
info_element->len),
beacon->header.addr3,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
goto free_network;
@@ -2560,7 +2561,7 @@ static inline void rtllib_process_probe_response(
if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
- if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ if (WLAN_FC_GET_STYPE(le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP) {
if (IsPassiveChannel(ieee, network->channel)) {
printk(KERN_INFO "GetScanInfo(): For Global Domain, "
@@ -2629,7 +2630,8 @@ static inline void rtllib_process_probe_response(
RTLLIB_DEBUG_SCAN("Adding '%s' ( %pM) via %s.\n",
escape_essid(network->ssid,
network->ssid_len), network->bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
memcpy(target, network, sizeof(*target));
@@ -2640,7 +2642,8 @@ static inline void rtllib_process_probe_response(
RTLLIB_DEBUG_SCAN("Updating '%s' ( %pM) via %s.\n",
escape_essid(target->ssid,
target->ssid_len), target->bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
@@ -2682,15 +2685,17 @@ void rtllib_rx_mgt(struct rtllib_device *ieee,
{
struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ;
- if (WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_BEACON)
+ if ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
+ RTLLIB_STYPE_PROBE_RESP) &&
+ (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
+ RTLLIB_STYPE_BEACON))
ieee->last_rx_ps_time = jiffies;
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
case RTLLIB_STYPE_BEACON:
RTLLIB_DEBUG_MGMT("received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
RTLLIB_DEBUG_SCAN("Beacon\n");
rtllib_process_probe_response(
ieee, (struct rtllib_probe_response *)header,
@@ -2705,14 +2710,15 @@ void rtllib_rx_mgt(struct rtllib_device *ieee,
case RTLLIB_STYPE_PROBE_RESP:
RTLLIB_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
RTLLIB_DEBUG_SCAN("Probe response\n");
rtllib_process_probe_response(ieee,
(struct rtllib_probe_response *)header, stats);
break;
case RTLLIB_STYPE_PROBE_REQ:
RTLLIB_DEBUG_MGMT("received PROBE RESQUEST (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(header->frame_ctl)));
RTLLIB_DEBUG_SCAN("Probe request\n");
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
((ieee->iw_mode == IW_MODE_ADHOC ||
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 933bd6d..4bf72bc 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -227,7 +227,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
/* called with 2nd param 0, no mgmt lock required */
rtllib_sta_wakeup(ieee, 0);
- if (header->frame_ctl == RTLLIB_STYPE_BEACON)
+ if (le16_to_cpu(header->frame_ctl) == RTLLIB_STYPE_BEACON)
tcb_desc->queue_index = BEACON_QUEUE;
else
tcb_desc->queue_index = MGNT_QUEUE;
@@ -295,7 +295,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
u16 fc, type, stype;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
- fc = header->frame_ctl;
+ fc = le16_to_cpu(header->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
@@ -807,18 +807,18 @@ inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
auth = (struct rtllib_authentication *)
skb_put(skb, sizeof(struct rtllib_authentication));
- auth->header.frame_ctl = RTLLIB_STYPE_AUTH;
+ auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
if (challengelen)
- auth->header.frame_ctl |= RTLLIB_FCTL_WEP;
+ auth->header.frame_ctl |= cpu_to_le16(RTLLIB_FCTL_WEP);
- auth->header.duration_id = 0x013a;
+ auth->header.duration_id = cpu_to_le16(0x013a);
memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
if (ieee->auth_mode == 0)
auth->algorithm = WLAN_AUTH_OPEN;
else if (ieee->auth_mode == 1)
- auth->algorithm = WLAN_AUTH_SHARED_KEY;
+ auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
else if (ieee->auth_mode == 2)
auth->algorithm = WLAN_AUTH_OPEN;
auth->transaction = cpu_to_le16(ieee->associate_seq);
@@ -921,8 +921,8 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
if (ieee->short_slot && (ieee->current_network.capability &
WLAN_CAPABILITY_SHORT_SLOT_TIME))
- cpu_to_le16((beacon_buf->capability |=
- WLAN_CAPABILITY_SHORT_SLOT_TIME));
+ beacon_buf->capability |=
+ cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (encrypt)
@@ -952,7 +952,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
u16 val16;
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- val16 = cpu_to_le16(ieee->current_network.atim_window);
+ val16 = ieee->current_network.atim_window;
memcpy((u8 *)tag, (u8 *)&val16, 2);
tag += 2;
}
@@ -1260,7 +1260,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
hdr->header.frame_ctl = RTLLIB_STYPE_ASSOC_REQ;
- hdr->header.duration_id = 37;
+ hdr->header.duration_id = cpu_to_le16(37);
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
@@ -1279,7 +1279,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
- hdr->listen_interval = beacon->listen_interval;
+ hdr->listen_interval = cpu_to_le16(beacon->listen_interval);
hdr->info_element[0].id = MFIE_TYPE_SSID;
@@ -1451,7 +1451,7 @@ static void rtllib_associate_abort_cb(unsigned long dev)
rtllib_associate_abort((struct rtllib_device *) dev);
}
-static void rtllib_associate_step1(struct rtllib_device *ieee, u8 * daddr)
+static void rtllib_associate_step1(struct rtllib_device *ieee, u8 *daddr)
{
struct rtllib_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -1785,7 +1785,7 @@ void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
+static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct rtllib_authentication *a;
u8 *t;
@@ -3633,7 +3633,7 @@ out:
}
EXPORT_SYMBOL(rtllib_wpa_supplicant_ioctl);
-void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
+static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
{
u8 OpMode;
u8 i;
@@ -3658,7 +3658,7 @@ void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
}
-void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
+static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
u8 asRsn)
{
u8 i;
@@ -3684,7 +3684,7 @@ void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
}
-void
+static void
rtllib_MgntDisconnectAP(
struct rtllib_device *rtllib,
u8 asRsn
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 3183627..7796488 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -171,7 +171,7 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ *(u16 *)(data + SNAP_SIZE) = h_proto;
return SNAP_SIZE + sizeof(u16);
}
@@ -231,7 +231,7 @@ static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
memset(txb, 0, sizeof(struct rtllib_txb));
txb->nr_frags = nr_frags;
- txb->frag_size = txb_size;
+ txb->frag_size = cpu_to_le16(txb_size);
for (i = 0; i < nr_frags; i++) {
txb->fragments[i] = dev_alloc_skb(txb_size);
@@ -610,7 +610,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
txb->encrypted = 0;
- txb->payload_size = skb->len;
+ txb->payload_size = cpu_to_le16(skb->len);
memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
skb->len);
@@ -764,7 +764,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
goto failed;
}
txb->encrypted = encrypt;
- txb->payload_size = bytes;
+ txb->payload_size = cpu_to_le16(bytes);
if (qos_actived)
txb->queue_index = UP2AC(skb->priority);
@@ -812,10 +812,10 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
if ((qos_actived) && (!bIsMulticast)) {
frag_hdr->seq_ctl =
- rtllib_query_seqnum(ieee, skb_frag,
- header.addr1);
+ cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
+ header.addr1));
frag_hdr->seq_ctl =
- cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
+ cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
} else {
frag_hdr->seq_ctl =
cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
@@ -870,7 +870,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
txb->encrypted = 0;
- txb->payload_size = skb->len;
+ txb->payload_size = cpu_to_le16(skb->len);
memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
skb->len);
}
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ad3bc56..c9d8c10 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 82a77b4..37fe330 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -29,7 +29,6 @@
#define _OS_INTFS_C_
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/firmware.h>
#include "osdep_service.h"
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 5b6a96e..1a4b7a6 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -31,7 +31,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 7e32431..c71c7e5 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -31,7 +31,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 9fec6ed..23d539d 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -42,7 +42,6 @@
#include <linux/wireless.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <net/iw_handler.h>
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 8fa0f9d..3ea99ae 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -1043,9 +1043,6 @@ void r8712_got_addbareq_event_callback(struct _adapter *adapter, u8 *pbuf)
struct sta_priv *pstapriv = &adapter->stapriv;
struct recv_reorder_ctrl *precvreorder_ctrl = NULL;
- netdev_info(adapter->pnetdev, "%s: mac = %pM, seq = %d, tid = %d\n",
- __func__, pAddbareq_pram->MacAddress,
- pAddbareq_pram->StartSeqNum, pAddbareq_pram->tid);
psta = r8712_get_stainfo(pstapriv, pAddbareq_pram->MacAddress);
if (psta) {
precvreorder_ctrl =
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 5349669..aae5125 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -31,7 +31,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index dbefa43..bbd5888 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -353,6 +353,10 @@ static void disable_ht_for_spec_devid(const struct usb_device_id *pdid,
}
}
+static const struct device_type wlan_type = {
+ .name = "wlan",
+};
+
/*
* drv_init() - a device potentially for us
*
@@ -388,6 +392,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
padapter->pusb_intf = pusb_intf;
usb_set_intfdata(pusb_intf, pnetdev);
SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
+ pnetdev->dev.type = &wlan_type;
/* step 2. */
padapter->dvobj_init = &r8712_usb_dvobj_init;
padapter->dvobj_deinit = &r8712_usb_dvobj_deinit;
diff --git a/drivers/staging/rts5139/ms.c b/drivers/staging/rts5139/ms.c
index a27f7e2..9253f6a 100644
--- a/drivers/staging/rts5139/ms.c
+++ b/drivers/staging/rts5139/ms.c
@@ -48,7 +48,7 @@ static inline int ms_check_err_code(struct rts51x_chip *chip, u8 err_code)
{
struct ms_info *ms_card = &(chip->ms_card);
- return (ms_card->err_code == err_code);
+ return ms_card->err_code == err_code;
}
static int ms_parse_err_code(struct rts51x_chip *chip)
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index 0421346..a8d2d04 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/utsname.h>
diff --git a/drivers/staging/rts5139/rts51x_card.c b/drivers/staging/rts5139/rts51x_card.c
index 509d83e..03456d9 100644
--- a/drivers/staging/rts5139/rts51x_card.c
+++ b/drivers/staging/rts5139/rts51x_card.c
@@ -373,7 +373,7 @@ void rts51x_release_cards(struct rts51x_chip *chip)
static inline u8 double_depth(u8 depth)
{
- return ((depth > 1) ? (depth - 1) : depth);
+ return (depth > 1) ? (depth - 1) : depth;
}
int rts51x_switch_ssc_clock(struct rts51x_chip *chip, int clk)
@@ -653,8 +653,8 @@ int rts51x_switch_normal_clock(struct rts51x_chip *chip, int clk)
return STATUS_SUCCESS;
}
-int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 sec_addr,
- u16 sec_cnt)
+int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
@@ -770,8 +770,8 @@ void rts51x_eject_card(struct rts51x_chip *chip, unsigned int lun)
XD_INT | MS_INT | SD_INT);
}
-void rts51x_trans_dma_enable(enum dma_data_direction dir, struct rts51x_chip *chip,
- u32 byte_cnt, u8 pack_size)
+void rts51x_trans_dma_enable(enum dma_data_direction dir,
+ struct rts51x_chip *chip, u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
diff --git a/drivers/staging/rts5139/rts51x_card.h b/drivers/staging/rts5139/rts51x_card.h
index e62b25c..df8816e 100644
--- a/drivers/staging/rts5139/rts51x_card.h
+++ b/drivers/staging/rts5139/rts51x_card.h
@@ -743,13 +743,13 @@ void rts51x_init_cards(struct rts51x_chip *chip);
void rts51x_release_cards(struct rts51x_chip *chip);
int rts51x_switch_ssc_clock(struct rts51x_chip *chip, int clk);
int rts51x_switch_normal_clock(struct rts51x_chip *chip, int clk);
-int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 sec_addr,
- u16 sec_cnt);
+int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
u8 rts51x_get_lun_card(struct rts51x_chip *chip, unsigned int lun);
int rts51x_select_card(struct rts51x_chip *chip, int card);
void rts51x_eject_card(struct rts51x_chip *chip, unsigned int lun);
-void rts51x_trans_dma_enable(enum dma_data_direction dir, struct rts51x_chip *chip,
- u32 byte_cnt, u8 pack_size);
+void rts51x_trans_dma_enable(enum dma_data_direction dir,
+ struct rts51x_chip *chip, u32 byte_cnt, u8 pack_size);
int rts51x_enable_card_clock(struct rts51x_chip *chip, u8 card);
int rts51x_card_power_on(struct rts51x_chip *chip, u8 card);
int rts51x_toggle_gpio(struct rts51x_chip *chip, u8 gpio);
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index a474eed..3a99025 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -1985,7 +1985,6 @@ static int show_info(struct seq_file *m, struct Scsi_Host *host)
SPRINTF(" Vendor: Realtek Corp.\n");
SPRINTF(" Product: RTS51xx USB Card Reader\n");
SPRINTF(" Version: %s\n", DRIVER_VERSION);
- SPRINTF(" Build: %s\n", __TIME__);
return 0;
}
diff --git a/drivers/staging/rts5208/Kconfig b/drivers/staging/rts5208/Kconfig
new file mode 100644
index 0000000..055655c
--- /dev/null
+++ b/drivers/staging/rts5208/Kconfig
@@ -0,0 +1,15 @@
+config RTS5208
+ tristate "Realtek PCI-E Card Reader RTS5208/5288 support"
+ depends on PCI && SCSI
+ help
+ Say Y here to include driver code to support the Realtek
+ PCI-E card reader rts5208/rts5288.
+
+ If this driver is compiled as a module, it will be named rts5208.
+
+config RTS5208_DEBUG
+ bool "Realtek PCI-E Card Reader RTS5208/5288 verbose debug"
+ depends on RTS5208
+ help
+ Say Y here in order to have the rts5208 code generate
+ verbose debugging messages.
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
new file mode 100644
index 0000000..17b4471
--- /dev/null
+++ b/drivers/staging/rts5208/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_RTS5208) := rts5208.o
+
+ccflags-y := -Idrivers/scsi
+
+rts5208-y := rtsx.o rtsx_chip.o rtsx_transport.o rtsx_scsi.o \
+ rtsx_card.o general.o sd.o xd.o ms.o spi.o
diff --git a/drivers/staging/rts5208/TODO b/drivers/staging/rts5208/TODO
new file mode 100644
index 0000000..57bcf58
--- /dev/null
+++ b/drivers/staging/rts5208/TODO
@@ -0,0 +1,7 @@
+TODO:
+- use kernel coding style
+- checkpatch.pl fixes
+- We will use the stack in drivers/mmc to implement
+ rts5208/5288 in the future
+
+Micky Ching <micky_ching@realsil.com.cn> \ No newline at end of file
diff --git a/drivers/staging/rts5208/debug.h b/drivers/staging/rts5208/debug.h
new file mode 100644
index 0000000..5ba8a3a
--- /dev/null
+++ b/drivers/staging/rts5208/debug.h
@@ -0,0 +1,43 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_DEBUG_H
+#define __REALTEK_RTSX_DEBUG_H
+
+#include <linux/kernel.h>
+
+#define RTSX_STOR "rts5208: "
+
+#ifdef CONFIG_RTS5208_DEBUG
+#define RTSX_DEBUGP(x...) pr_debug(RTSX_STOR x)
+#define RTSX_DEBUGPN(x...) pr_debug(x)
+#define RTSX_DEBUGPX(x...) printk(x)
+#define RTSX_DEBUG(x) x
+#else
+#define RTSX_DEBUGP(x...)
+#define RTSX_DEBUGPN(x...)
+#define RTSX_DEBUGPX(x...)
+#define RTSX_DEBUG(x)
+#endif
+
+#endif /* __REALTEK_RTSX_DEBUG_H */
diff --git a/drivers/staging/rts5208/general.c b/drivers/staging/rts5208/general.c
new file mode 100644
index 0000000..eada934
--- /dev/null
+++ b/drivers/staging/rts5208/general.c
@@ -0,0 +1,35 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include "general.h"
+
+int bit1cnt_long(u32 data)
+{
+ int i, cnt = 0;
+ for (i = 0; i < 32; i++) {
+ if (data & 0x01)
+ cnt++;
+ data >>= 1;
+ }
+ return cnt;
+}
+
diff --git a/drivers/staging/rts5208/general.h b/drivers/staging/rts5208/general.h
new file mode 100644
index 0000000..90a1f92
--- /dev/null
+++ b/drivers/staging/rts5208/general.h
@@ -0,0 +1,31 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __RTSX_GENERAL_H
+#define __RTSX_GENERAL_H
+
+#include "rtsx.h"
+
+int bit1cnt_long(u32 data);
+
+#endif /* __RTSX_GENERAL_H */
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
new file mode 100644
index 0000000..edf979f
--- /dev/null
+++ b/drivers/staging/rts5208/ms.c
@@ -0,0 +1,4208 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "ms.h"
+
+static inline void ms_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ ms_card->err_code = err_code;
+}
+
+static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ return (ms_card->err_code == err_code);
+}
+
+static int ms_parse_err_code(struct rtsx_chip *chip)
+{
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
+ u8 tpc, u8 cnt, u8 cfg)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 *ptr;
+
+ RTSX_DEBUGP("ms_transfer_tpc: tpc = 0x%x\n", tpc);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval < 0) {
+ rtsx_clear_ms_error(chip);
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if (!(tpc & 0x08)) { /* Read Packet */
+ if (*ptr & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else { /* Write Packet */
+ if (CHK_MSPRO(ms_card) && !(*ptr & 0x80)) {
+ if (*ptr & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ if (*ptr & MS_RDY_TIMEOUT) {
+ rtsx_clear_ms_error(chip);
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
+ u8 tpc, u16 sec_cnt, u8 cfg, int mode_2k,
+ int use_sg, void *buf, int buf_len)
+{
+ int retval;
+ u8 val, err_code = 0;
+ enum dma_data_direction dir;
+
+ if (!buf || !buf_len)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (trans_mode == MS_TM_AUTO_READ) {
+ dir = DMA_FROM_DEVICE;
+ err_code = MS_FLASH_READ_ERROR;
+ } else if (trans_mode == MS_TM_AUTO_WRITE) {
+ dir = DMA_TO_DEVICE;
+ err_code = MS_FLASH_WRITE_ERROR;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_SECTOR_CNT_H, 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8)sec_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+
+ if (mode_2k) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_CFG, MS_2K_SECTOR_MODE, MS_2K_SECTOR_MODE);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, 0);
+ }
+
+ trans_dma_enable(dir, chip, sec_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_add_cmd(chip, CHECK_REG_CMD,
+ MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, MS_CARD, buf, buf_len,
+ use_sg, dir, chip->mspro_timeout);
+ if (retval < 0) {
+ ms_set_err_code(chip, err_code);
+ if (retval == -ETIMEDOUT)
+ retval = STATUS_TIMEDOUT;
+ else
+ retval = STATUS_FAIL;
+
+ TRACE_RET(chip, retval);
+ }
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, &val);
+ if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_bytes(struct rtsx_chip *chip,
+ u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ if (!data || (data_len < cnt))
+ TRACE_RET(chip, STATUS_ERROR);
+
+ rtsx_init_cmd(chip);
+
+ for (i = 0; i < cnt; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, data[i]);
+ }
+ if (cnt % 2)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD,
+ MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval < 0) {
+ u8 val = 0;
+
+ rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ RTSX_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val);
+
+ rtsx_clear_ms_error(chip);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else {
+ if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip,
+ ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ if (val & MS_RDY_TIMEOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_bytes(struct rtsx_chip *chip,
+ u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 *ptr;
+
+ if (!data)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_READ_BYTES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ for (i = 0; i < data_len - 1; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+
+ if (data_len % 2)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0);
+ else
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1,
+ 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval < 0) {
+ u8 val = 0;
+
+ rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ rtsx_clear_ms_error(chip);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else {
+ if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip,
+ ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ if (val & MS_RDY_TIMEOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ for (i = 0; i < data_len; i++)
+ data[i] = ptr[i];
+
+ if ((tpc == PRO_READ_SHORT_DATA) && (data_len == 8)) {
+ RTSX_DEBUGP("Read format progress:\n");
+ RTSX_DUMP(ptr, cnt);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_set_rw_reg_addr(struct rtsx_chip *chip,
+ u8 read_start, u8 read_cnt, u8 write_start, u8 write_cnt)
+{
+ int retval, i;
+ u8 data[4];
+
+ data[0] = read_start;
+ data[1] = read_cnt;
+ data[2] = write_start;
+ data[3] = write_cnt;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, SET_RW_REG_ADRS, 4,
+ NO_WAIT_INT, data, 4);
+ if (retval == STATUS_SUCCESS)
+ return STATUS_SUCCESS;
+ rtsx_clear_ms_error(chip);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_send_cmd(struct rtsx_chip *chip, u8 cmd, u8 cfg)
+{
+ u8 data[2];
+
+ data[0] = cmd;
+ data[1] = 0;
+
+ return ms_write_bytes(chip, PRO_SET_CMD, 1, cfg, data, 1);
+}
+
+static int ms_set_init_para(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (CHK_HG8BIT(ms_card)) {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->asic_ms_hg_clk;
+ else
+ ms_card->ms_clock = chip->fpga_ms_hg_clk;
+
+ } else if (CHK_MSPRO(ms_card) || CHK_MS4BIT(ms_card)) {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->asic_ms_4bit_clk;
+ else
+ ms_card->ms_clock = chip->fpga_ms_4bit_clk;
+
+ } else {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->asic_ms_1bit_clk;
+ else
+ ms_card->ms_clock = chip->fpga_ms_1bit_clk;
+ }
+
+ retval = switch_clock(chip, ms_card->ms_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_clock(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ retval = select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = switch_clock(chip, ms_card->ms_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
+ MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
+ MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pull_ctl_enable(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL2, 0xFF, 0x45);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL3, 0xFF, 0x4B);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL4, 0xFF, 0x29);
+ }
+ }
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_prepare_reset(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 oc_mask = 0;
+
+ ms_card->ms_type = 0;
+ ms_card->check_ms_flow = 0;
+ ms_card->switch_8bit_fail = 0;
+ ms_card->delay_write.delay_write_flag = 0;
+
+ ms_card->pro_under_formatting = 0;
+
+ retval = ms_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode)
+ wait_timeout(250);
+
+ retval = enable_card_clock(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->asic_code) {
+ retval = ms_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_MS_PULL_CTL_BIT | 0x20, 0);
+ }
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_on(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(150);
+
+#ifdef SUPPORT_OCP
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ oc_mask = MS_OC_NOW | MS_OC_EVER;
+ else
+ oc_mask = SD_OC_NOW | SD_OC_EVER;
+
+ if (chip->ocp_stat & oc_mask) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ RTSX_WRITE_REG(chip, CARD_OE, MS_OUTPUT_EN, MS_OUTPUT_EN);
+
+ if (chip->asic_code) {
+ RTSX_WRITE_REG(chip, MS_CFG, 0xFF,
+ SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ } else {
+ RTSX_WRITE_REG(chip, MS_CFG, 0xFF,
+ SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ }
+ RTSX_WRITE_REG(chip, MS_TRANS_CFG,
+ 0xFF, NO_WAIT_INT | NO_AUTO_READ_INT_REG);
+ RTSX_WRITE_REG(chip, CARD_STOP,
+ MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR);
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val;
+
+ retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
+ 6, NO_WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 2, &val);
+ RTSX_DEBUGP("Type register: 0x%x\n", val);
+ if (val != 0x01) {
+ if (val != 0x02)
+ ms_card->check_ms_flow = 1;
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 4, &val);
+ RTSX_DEBUGP("Category register: 0x%x\n", val);
+ if (val != 0) {
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 5, &val);
+ RTSX_DEBUGP("Class register: 0x%x\n", val);
+ if (val == 0) {
+ RTSX_READ_REG(chip, PPBUF_BASE2, &val);
+ if (val & WRT_PRTCT)
+ chip->card_wp |= MS_CARD;
+ else
+ chip->card_wp &= ~MS_CARD;
+
+ } else if ((val == 0x01) || (val == 0x02) || (val == 0x03)) {
+ chip->card_wp |= MS_CARD;
+ } else {
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->ms_type |= TYPE_MSPRO;
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 3, &val);
+ RTSX_DEBUGP("IF Mode register: 0x%x\n", val);
+ if (val == 0) {
+ ms_card->ms_type &= 0x0F;
+ } else if (val == 7) {
+ if (switch_8bit_bus)
+ ms_card->ms_type |= MS_HG;
+ else
+ ms_card->ms_type &= 0x0F;
+
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
+{
+ int retval, i, k;
+ u8 val;
+
+ /* Confirm CPU StartUp */
+ k = 0;
+ do {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_read_bytes(chip, GET_INT, 1,
+ NO_WAIT_INT, &val, 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (k > 100)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ k++;
+ wait_timeout(100);
+ } while (!(val & INT_REG_CED));
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_ERR) {
+ if (val & INT_REG_CMDNK)
+ chip->card_wp |= (MS_CARD);
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* -- end confirm CPU startup */
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_parallel_bus(struct rtsx_chip *chip)
+{
+ int retval, i;
+ u8 data[2];
+
+ data[0] = PARALLEL_4BIT_IF;
+ data[1] = 0;
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT,
+ data, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_8bit_bus(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 data[2];
+
+ data[0] = PARALLEL_8BIT_IF;
+ data[1] = 0;
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 1,
+ NO_WAIT_INT, data, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, MS_CFG, 0x98,
+ MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING);
+ ms_card->ms_type |= MS_8BIT;
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
+ 1, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ for (i = 0; i < 3; i++) {
+ retval = ms_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_identify_media_type(chip, switch_8bit_bus);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_confirm_cpu_startup(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_switch_parallel_bus(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Switch MS-PRO into Parallel mode */
+ RTSX_WRITE_REG(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4);
+ RTSX_WRITE_REG(chip, MS_CFG, PUSH_TIME_ODD, PUSH_TIME_ODD);
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* If MSPro HG Card, We shall try to switch to 8-bit bus */
+ if (CHK_MSHG(ms_card) && chip->support_ms_8bit && switch_8bit_bus) {
+ retval = ms_switch_8bit_bus(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->switch_8bit_fail = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef XC_POWERCLASS
+static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
+{
+ int retval;
+ u8 buf[6];
+
+ ms_cleanup_work(chip);
+
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf[0] = 0;
+ buf[1] = mode;
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_REG , 6, NO_WAIT_INT, buf, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, buf);
+ if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_read_attribute_info(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, *buf, class_code, device_type, sub_class, data[16];
+ u16 total_blk = 0, blk_size = 0;
+#ifdef SUPPORT_MSXC
+ u32 xc_total_blk = 0, xc_blk_size = 0;
+#endif
+ u32 sys_info_addr = 0, sys_info_size;
+#ifdef SUPPORT_PCGL_1P18
+ u32 model_name_addr = 0, model_name_size;
+ int found_sys_info = 0, found_model_name = 0;
+#endif
+
+ retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS8BIT(ms_card))
+ data[0] = PARALLEL_8BIT_IF;
+ else
+ data[0] = PARALLEL_4BIT_IF;
+
+ data[1] = 0;
+
+ data[2] = 0x40;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = 0;
+ data[6] = 0;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, 7, NO_WAIT_INT,
+ data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(64 * 512, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, PRO_READ_ATRB, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (!(val & MS_INT_BREQ)) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
+ PRO_READ_LONG_DATA, 0x40, WAIT_INT,
+ 0, 0, buf, 64 * 512);
+ if (retval == STATUS_SUCCESS)
+ break;
+ else
+ rtsx_clear_ms_error(chip);
+ }
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i = 0;
+ do {
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((val & MS_INT_CED) || !(val & MS_INT_BREQ))
+ break;
+
+ retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
+ PRO_READ_LONG_DATA, 0, WAIT_INT);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i++;
+ } while (i < 1024);
+
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) {
+ /* Signature code is wrong */
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((buf[4] < 1) || (buf[4] > 12)) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < buf[4]; i++) {
+ int cur_addr_off = 16 + i * 12;
+
+#ifdef SUPPORT_MSXC
+ if ((buf[cur_addr_off + 8] == 0x10) ||
+ (buf[cur_addr_off + 8] == 0x13))
+#else
+ if (buf[cur_addr_off + 8] == 0x10)
+#endif
+ {
+ sys_info_addr = ((u32)buf[cur_addr_off + 0] << 24) |
+ ((u32)buf[cur_addr_off + 1] << 16) |
+ ((u32)buf[cur_addr_off + 2] << 8) |
+ buf[cur_addr_off + 3];
+ sys_info_size = ((u32)buf[cur_addr_off + 4] << 24) |
+ ((u32)buf[cur_addr_off + 5] << 16) |
+ ((u32)buf[cur_addr_off + 6] << 8) |
+ buf[cur_addr_off + 7];
+ RTSX_DEBUGP("sys_info_addr = 0x%x, sys_info_size = 0x%x\n",
+ sys_info_addr, sys_info_size);
+ if (sys_info_size != 96) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (sys_info_addr < 0x1A0) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if ((sys_info_size + sys_info_addr) > 0x8000) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+#ifdef SUPPORT_MSXC
+ if (buf[cur_addr_off + 8] == 0x13)
+ ms_card->ms_type |= MS_XC;
+#endif
+#ifdef SUPPORT_PCGL_1P18
+ found_sys_info = 1;
+#else
+ break;
+#endif
+ }
+#ifdef SUPPORT_PCGL_1P18
+ if (buf[cur_addr_off + 8] == 0x15) {
+ model_name_addr = ((u32)buf[cur_addr_off + 0] << 24) |
+ ((u32)buf[cur_addr_off + 1] << 16) |
+ ((u32)buf[cur_addr_off + 2] << 8) |
+ buf[cur_addr_off + 3];
+ model_name_size = ((u32)buf[cur_addr_off + 4] << 24) |
+ ((u32)buf[cur_addr_off + 5] << 16) |
+ ((u32)buf[cur_addr_off + 6] << 8) |
+ buf[cur_addr_off + 7];
+ RTSX_DEBUGP("model_name_addr = 0x%x, model_name_size = 0x%x\n",
+ model_name_addr, model_name_size);
+ if (model_name_size != 48) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (model_name_addr < 0x1A0) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if ((model_name_size + model_name_addr) > 0x8000) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ found_model_name = 1;
+ }
+
+ if (found_sys_info && found_model_name)
+ break;
+#endif
+ }
+
+ if (i == buf[4]) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ class_code = buf[sys_info_addr + 0];
+ device_type = buf[sys_info_addr + 56];
+ sub_class = buf[sys_info_addr + 46];
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ xc_total_blk = ((u32)buf[sys_info_addr + 6] << 24) |
+ ((u32)buf[sys_info_addr + 7] << 16) |
+ ((u32)buf[sys_info_addr + 8] << 8) |
+ buf[sys_info_addr + 9];
+ xc_blk_size = ((u32)buf[sys_info_addr + 32] << 24) |
+ ((u32)buf[sys_info_addr + 33] << 16) |
+ ((u32)buf[sys_info_addr + 34] << 8) |
+ buf[sys_info_addr + 35];
+ RTSX_DEBUGP("xc_total_blk = 0x%x, xc_blk_size = 0x%x\n",
+ xc_total_blk, xc_blk_size);
+ } else {
+ total_blk = ((u16)buf[sys_info_addr + 6] << 8) |
+ buf[sys_info_addr + 7];
+ blk_size = ((u16)buf[sys_info_addr + 2] << 8) |
+ buf[sys_info_addr + 3];
+ RTSX_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n",
+ total_blk, blk_size);
+ }
+#else
+ total_blk = ((u16)buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7];
+ blk_size = ((u16)buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3];
+ RTSX_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk, blk_size);
+#endif
+
+ RTSX_DEBUGP("class_code = 0x%x, device_type = 0x%x, sub_class = 0x%x\n",
+ class_code, device_type, sub_class);
+
+ memcpy(ms_card->raw_sys_info, buf + sys_info_addr, 96);
+#ifdef SUPPORT_PCGL_1P18
+ memcpy(ms_card->raw_model_name, buf + model_name_addr, 48);
+#endif
+
+ kfree(buf);
+
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ if (class_code != 0x03)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ if (class_code != 0x02)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#else
+ if (class_code != 0x02)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ if (device_type != 0x00) {
+ if ((device_type == 0x01) || (device_type == 0x02) ||
+ (device_type == 0x03)) {
+ chip->card_wp |= MS_CARD;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (sub_class & 0xC0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("class_code: 0x%x, device_type: 0x%x, sub_class: 0x%x\n",
+ class_code, device_type, sub_class);
+
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ chip->capacity[chip->card2lun[MS_CARD]] =
+ ms_card->capacity = xc_total_blk * xc_blk_size;
+ } else {
+ chip->capacity[chip->card2lun[MS_CARD]] =
+ ms_card->capacity = total_blk * blk_size;
+ }
+#else
+ ms_card->capacity = total_blk * blk_size;
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_MAGIC_GATE
+static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
+ int type, u8 mg_entry_num);
+#endif
+
+static int reset_ms_pro(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+#ifdef XC_POWERCLASS
+ u8 change_power_class;
+
+ if (chip->ms_power_class_en & 0x02)
+ change_power_class = 2;
+ else if (chip->ms_power_class_en & 0x01)
+ change_power_class = 1;
+ else
+ change_power_class = 0;
+#endif
+
+#ifdef XC_POWERCLASS
+Retry:
+#endif
+ retval = ms_pro_reset_flow(chip, 1);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->switch_8bit_fail) {
+ retval = ms_pro_reset_flow(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_read_attribute_info(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef XC_POWERCLASS
+ if (CHK_HG8BIT(ms_card))
+ change_power_class = 0;
+
+ if (change_power_class && CHK_MSXC(ms_card)) {
+ u8 power_class_en = chip->ms_power_class_en;
+
+ RTSX_DEBUGP("power_class_en = 0x%x\n", power_class_en);
+ RTSX_DEBUGP("change_power_class = %d\n", change_power_class);
+
+ if (change_power_class)
+ power_class_en &= (1 << (change_power_class - 1));
+ else
+ power_class_en = 0;
+
+ if (power_class_en) {
+ u8 power_class_mode =
+ (ms_card->raw_sys_info[46] & 0x18) >> 3;
+ RTSX_DEBUGP("power_class_mode = 0x%x",
+ power_class_mode);
+ if (change_power_class > power_class_mode)
+ change_power_class = power_class_mode;
+ if (change_power_class) {
+ retval = msxc_change_power(chip,
+ change_power_class);
+ if (retval != STATUS_SUCCESS) {
+ change_power_class--;
+ goto Retry;
+ }
+ }
+ }
+ }
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ retval = mg_set_tpc_para_sub(chip, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ if (CHK_HG8BIT(ms_card))
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 8;
+ else
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_status_reg(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val[2];
+
+ retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_read_extra_data(struct rtsx_chip *chip,
+ u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, data[10];
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ data[1] = 0;
+ data[2] = (u8)(block_addr >> 8);
+ data[3] = (u8)block_addr;
+ data[4] = 0x40;
+ data[5] = page_num;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
+ data, 6);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
+ data, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (buf && buf_len) {
+ if (buf_len > MS_EXTRA_SIZE)
+ buf_len = MS_EXTRA_SIZE;
+ memcpy(buf, data, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_extra_data(struct rtsx_chip *chip,
+ u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, data[16];
+
+ if (!buf || (buf_len < MS_EXTRA_SIZE))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6 + MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(block_addr >> 8);
+ data[3] = (u8)block_addr;
+ data[4] = 0x40;
+ data[5] = page_num;
+
+ for (i = 6; i < MS_EXTRA_SIZE + 6; i++)
+ data[i] = buf[i - 6];
+
+ retval = ms_write_bytes(chip, WRITE_REG , (6+MS_EXTRA_SIZE),
+ NO_WAIT_INT, data, 16);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val, data[6];
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(block_addr >> 8);
+ data[3] = (u8)block_addr;
+ data[4] = 0x20;
+ data[5] = page_num;
+
+ retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT, data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+
+ } else {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
+ 0, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val, data[8], extra[MS_EXTRA_SIZE];
+
+ retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(phy_blk >> 8);
+ data[3] = (u8)phy_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = extra[0] & 0x7F;
+ data[7] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i = 0;
+ u8 val, data[6];
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(phy_blk >> 8);
+ data[3] = (u8)phy_blk;
+ data[4] = 0;
+ data[5] = 0;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ERASE_RTY:
+ retval = ms_send_cmd(chip, BLOCK_ERASE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ if (i < 3) {
+ i++;
+ goto ERASE_RTY;
+ }
+
+ ms_set_err_code(chip, MS_CMD_NK);
+ ms_set_bad_block(chip, phy_blk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
+{
+ if (!extra || (extra_len < MS_EXTRA_SIZE))
+ return;
+
+ memset(extra, 0xFF, MS_EXTRA_SIZE);
+
+ if (type == setPS_NG) {
+ /* set page status as 1:NG,and block status keep 1:OK */
+ extra[0] = 0xB8;
+ } else {
+ /* set page status as 0:Data Error,and block status keep 1:OK */
+ extra[0] = 0x98;
+ }
+
+ extra[2] = (u8)(log_blk >> 8);
+ extra[3] = (u8)log_blk;
+}
+
+static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
+ u8 start_page, u8 end_page)
+{
+ int retval;
+ u8 extra[MS_EXTRA_SIZE], i;
+
+ memset(extra, 0xff, MS_EXTRA_SIZE);
+
+ extra[0] = 0xf8; /* Block, page OK, data erased */
+ extra[1] = 0xff;
+ extra[2] = (u8)(log_blk >> 8);
+ extra[3] = (u8)log_blk;
+
+ for (i = start_page; i < end_page; i++) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_write_extra_data(chip, phy_blk, i,
+ extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page, u8 end_page)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, rty_cnt, uncorrect_flag = 0;
+ u8 extra[MS_EXTRA_SIZE], val, i, j, data[16];
+
+ RTSX_DEBUGP("Copy page from 0x%x to 0x%x, logical block is 0x%x\n",
+ old_blk, new_blk, log_blk);
+ RTSX_DEBUGP("start_page = %d, end_page = %d\n", start_page, end_page);
+
+ retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PPBUF_BASE2, &val);
+
+ if (val & BUF_FULL) {
+ retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ for (i = start_page; i < end_page; i++) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(old_blk >> 8);
+ data[3] = (u8)old_blk;
+ data[4] = 0x20;
+ data[5] = i;
+
+ retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT,
+ data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ uncorrect_flag = 1;
+ RTSX_DEBUGP("Uncorrectable error\n");
+ } else {
+ uncorrect_flag = 0;
+ }
+
+ retval = ms_transfer_tpc(chip,
+ MS_TM_NORMAL_READ,
+ READ_PAGE_DATA,
+ 0, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (uncorrect_flag) {
+ ms_set_page_status(log_blk, setPS_NG,
+ extra, MS_EXTRA_SIZE);
+ if (i == 0)
+ extra[0] &= 0xEF;
+
+ ms_write_extra_data(chip, old_blk, i,
+ extra, MS_EXTRA_SIZE);
+ RTSX_DEBUGP("page %d : extra[0] = 0x%x\n", i, extra[0]);
+ MS_SET_BAD_BLOCK_FLG(ms_card);
+
+ ms_set_page_status(log_blk, setPS_Error,
+ extra, MS_EXTRA_SIZE);
+ ms_write_extra_data(chip, new_blk, i,
+ extra, MS_EXTRA_SIZE);
+ continue;
+ }
+
+ for (rty_cnt = 0; rty_cnt < MS_MAX_RETRY_COUNT;
+ rty_cnt++) {
+ retval = ms_transfer_tpc(
+ chip,
+ MS_TM_NORMAL_WRITE,
+ WRITE_PAGE_DATA,
+ 0, NO_WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (rty_cnt == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, (6+MS_EXTRA_SIZE));
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(new_blk >> 8);
+ data[3] = (u8)new_blk;
+ data[4] = 0x20;
+ data[5] = i;
+
+ if ((extra[0] & 0x60) != 0x60)
+ data[6] = extra[0];
+ else
+ data[6] = 0xF8;
+
+ data[6 + 1] = 0xFF;
+ data[6 + 2] = (u8)(log_blk >> 8);
+ data[6 + 3] = (u8)log_blk;
+
+ for (j = 4; j <= MS_EXTRA_SIZE; j++)
+ data[6 + j] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
+ NO_WAIT_INT, data, 16);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (i == 0) {
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(old_blk >> 8);
+ data[3] = (u8)old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 7,
+ NO_WAIT_INT, data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1,
+ NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip,
+ MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int reset_ms(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u16 i, reg_addr, block_size;
+ u8 val, extra[MS_EXTRA_SIZE], j, *ptr;
+#ifndef SUPPORT_MAGIC_GATE
+ u16 eblock_cnt;
+#endif
+
+ retval = ms_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_card->ms_type |= TYPE_MS;
+
+ retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PPBUF_BASE2, &val);
+ if (val & WRT_PRTCT)
+ chip->card_wp |= MS_CARD;
+ else
+ chip->card_wp &= ~MS_CARD;
+
+ i = 0;
+
+RE_SEARCH:
+ /* Search Boot Block */
+ while (i < (MAX_DEFECTIVE_BLOCK + 2)) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_extra_data(chip, i, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS) {
+ i++;
+ continue;
+ }
+
+ if (extra[0] & BLOCK_OK) {
+ if (!(extra[1] & NOT_BOOT_BLOCK)) {
+ ms_card->boot_block = i;
+ break;
+ }
+ }
+ i++;
+ }
+
+ if (i == (MAX_DEFECTIVE_BLOCK + 2)) {
+ RTSX_DEBUGP("No boot block found!");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (j = 0; j < 3; j++) {
+ retval = ms_read_page(chip, ms_card->boot_block, j);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) {
+ i = ms_card->boot_block + 1;
+ ms_set_err_code(chip, MS_NO_ERROR);
+ goto RE_SEARCH;
+ }
+ }
+ }
+
+ retval = ms_read_page(chip, ms_card->boot_block, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Read MS system information as sys_info */
+ rtsx_init_cmd(chip);
+
+ for (i = 0; i < 96; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 0x1A0 + i, 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip);
+ memcpy(ms_card->raw_sys_info, ptr, 96);
+
+ /* Read useful block contents */
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID0, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID1, 0, 0);
+
+ for (reg_addr = DISABLED_BLOCK0; reg_addr <= DISABLED_BLOCK3;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip);
+
+ RTSX_DEBUGP("Boot block data:\n");
+ RTSX_DUMP(ptr, 16);
+
+ /* Block ID error
+ * HEADER_ID0, HEADER_ID1
+ */
+ if (ptr[0] != 0x00 || ptr[1] != 0x01) {
+ i = ms_card->boot_block + 1;
+ goto RE_SEARCH;
+ }
+
+ /* Page size error
+ * PAGE_SIZE_0, PAGE_SIZE_1
+ */
+ if (ptr[12] != 0x02 || ptr[13] != 0x00) {
+ i = ms_card->boot_block + 1;
+ goto RE_SEARCH;
+ }
+
+ if ((ptr[14] == 1) || (ptr[14] == 3))
+ chip->card_wp |= MS_CARD;
+
+ /* BLOCK_SIZE_0, BLOCK_SIZE_1 */
+ block_size = ((u16)ptr[6] << 8) | ptr[7];
+ if (block_size == 0x0010) {
+ /* Block size 16KB */
+ ms_card->block_shift = 5;
+ ms_card->page_off = 0x1F;
+ } else if (block_size == 0x0008) {
+ /* Block size 8KB */
+ ms_card->block_shift = 4;
+ ms_card->page_off = 0x0F;
+ }
+
+ /* BLOCK_COUNT_0, BLOCK_COUNT_1 */
+ ms_card->total_block = ((u16)ptr[8] << 8) | ptr[9];
+
+#ifdef SUPPORT_MAGIC_GATE
+ j = ptr[10];
+
+ if (ms_card->block_shift == 4) { /* 4MB or 8MB */
+ if (j < 2) { /* Effective block for 4MB: 0x1F0 */
+ ms_card->capacity = 0x1EE0;
+ } else { /* Effective block for 8MB: 0x3E0 */
+ ms_card->capacity = 0x3DE0;
+ }
+ } else { /* 16MB, 32MB, 64MB or 128MB */
+ if (j < 5) { /* Effective block for 16MB: 0x3E0 */
+ ms_card->capacity = 0x7BC0;
+ } else if (j < 0xA) { /* Effective block for 32MB: 0x7C0 */
+ ms_card->capacity = 0xF7C0;
+ } else if (j < 0x11) { /* Effective block for 64MB: 0xF80 */
+ ms_card->capacity = 0x1EF80;
+ } else { /* Effective block for 128MB: 0x1F00 */
+ ms_card->capacity = 0x3DF00;
+ }
+ }
+#else
+ /* EBLOCK_COUNT_0, EBLOCK_COUNT_1 */
+ eblock_cnt = ((u16)ptr[10] << 8) | ptr[11];
+
+ ms_card->capacity = ((u32)eblock_cnt - 2) << ms_card->block_shift;
+#endif
+
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
+
+ /* Switch I/F Mode */
+ if (ptr[15]) {
+ retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, PPBUF_BASE2, 0xFF, 0x88);
+ RTSX_WRITE_REG(chip, PPBUF_BASE2 + 1, 0xFF, 0);
+
+ retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG , 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, MS_CFG, 0x58 | MS_NO_CHECK_INT,
+ MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT);
+
+ ms_card->ms_type |= MS_4BIT;
+ }
+
+ if (CHK_MS4BIT(ms_card))
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
+ else
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 1;
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_init_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int size, i, seg_no, retval;
+ u16 defect_block, reg_addr;
+ u8 val1, val2;
+
+ ms_card->segment_cnt = ms_card->total_block >> 9;
+ RTSX_DEBUGP("ms_card->segment_cnt = %d\n", ms_card->segment_cnt);
+
+ size = ms_card->segment_cnt * sizeof(struct zone_entry);
+ ms_card->segment = vzalloc(size);
+ if (ms_card->segment == NULL)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_page(chip, ms_card->boot_block, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < (((ms_card->total_block >> 9) * 10) + 1); i++) {
+ retval = rtsx_read_register(chip, reg_addr++, &val1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ retval = rtsx_read_register(chip, reg_addr++, &val2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ defect_block = ((u16)val1 << 8) | val2;
+ if (defect_block == 0xFFFF)
+ break;
+
+ seg_no = defect_block / 512;
+ ms_card->segment[seg_no].defect_list[ms_card->segment[seg_no].disable_count++] = defect_block;
+ }
+
+ for (i = 0; i < ms_card->segment_cnt; i++) {
+ ms_card->segment[i].build_flag = 0;
+ ms_card->segment[i].l2p_table = NULL;
+ ms_card->segment[i].free_table = NULL;
+ ms_card->segment[i].get_index = 0;
+ ms_card->segment[i].set_index = 0;
+ ms_card->segment[i].unused_blk_cnt = 0;
+
+ RTSX_DEBUGP("defective block count of segment %d is %d\n",
+ i, ms_card->segment[i].disable_count);
+ }
+
+ return STATUS_SUCCESS;
+
+INIT_FAIL:
+ if (ms_card->segment) {
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+
+ if (ms_card->segment == NULL)
+ return 0xFFFF;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->l2p_table)
+ return segment->l2p_table[log_off];
+
+ return 0xFFFF;
+}
+
+static void ms_set_l2p_tbl(struct rtsx_chip *chip,
+ int seg_no, u16 log_off, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+
+ if (ms_card->segment == NULL)
+ return;
+
+ segment = &(ms_card->segment[seg_no]);
+ if (segment->l2p_table)
+ segment->l2p_table[log_off] = phy_blk;
+}
+
+static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int seg_no;
+
+ seg_no = (int)phy_blk >> 9;
+ segment = &(ms_card->segment[seg_no]);
+
+ segment->free_table[segment->set_index++] = phy_blk;
+ if (segment->set_index >= MS_FREE_TABLE_CNT)
+ segment->set_index = 0;
+
+ segment->unused_blk_cnt++;
+}
+
+static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ u16 phy_blk;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->unused_blk_cnt <= 0)
+ return 0xFFFF;
+
+ phy_blk = segment->free_table[segment->get_index];
+ segment->free_table[segment->get_index++] = 0xFFFF;
+ if (segment->get_index >= MS_FREE_TABLE_CNT)
+ segment->get_index = 0;
+
+ segment->unused_blk_cnt--;
+
+ return phy_blk;
+}
+
+static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
+ 2974, 3470, 3966, 4462, 4958,
+ 5454, 5950, 6446, 6942, 7438,
+ 7934};
+
+static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
+ u16 log_off, u8 us1, u8 us2)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int seg_no;
+ u16 tmp_blk;
+
+ seg_no = (int)phy_blk >> 9;
+ segment = &(ms_card->segment[seg_no]);
+ tmp_blk = segment->l2p_table[log_off];
+
+ if (us1 != us2) {
+ if (us1 == 0) {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, tmp_blk);
+
+ ms_set_unused_block(chip, tmp_blk);
+ segment->l2p_table[log_off] = phy_blk;
+ } else {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, phy_blk);
+
+ ms_set_unused_block(chip, phy_blk);
+ }
+ } else {
+ if (phy_blk < tmp_blk) {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, phy_blk);
+
+ ms_set_unused_block(chip, phy_blk);
+ } else {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, tmp_blk);
+
+ ms_set_unused_block(chip, tmp_blk);
+ segment->l2p_table[log_off] = phy_blk;
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int retval, table_size, disable_cnt, defect_flag, i;
+ u16 start, end, phy_blk, log_blk, tmp_blk;
+ u8 extra[MS_EXTRA_SIZE], us1, us2;
+
+ RTSX_DEBUGP("ms_build_l2p_tbl: %d\n", seg_no);
+
+ if (ms_card->segment == NULL) {
+ retval = ms_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ if (ms_card->segment[seg_no].build_flag) {
+ RTSX_DEBUGP("l2p table of segment %d has been built\n", seg_no);
+ return STATUS_SUCCESS;
+ }
+
+ if (seg_no == 0)
+ table_size = 494;
+ else
+ table_size = 496;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->l2p_table == NULL) {
+ segment->l2p_table = vmalloc(table_size * 2);
+ if (segment->l2p_table == NULL)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ }
+ memset((u8 *)(segment->l2p_table), 0xff, table_size * 2);
+
+ if (segment->free_table == NULL) {
+ segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2);
+ if (segment->free_table == NULL)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ }
+ memset((u8 *)(segment->free_table), 0xff, MS_FREE_TABLE_CNT * 2);
+
+ start = (u16)seg_no << 9;
+ end = (u16)(seg_no + 1) << 9;
+
+ disable_cnt = segment->disable_count;
+
+ segment->get_index = segment->set_index = 0;
+ segment->unused_blk_cnt = 0;
+
+ for (phy_blk = start; phy_blk < end; phy_blk++) {
+ if (disable_cnt) {
+ defect_flag = 0;
+ for (i = 0; i < segment->disable_count; i++) {
+ if (phy_blk == segment->defect_list[i]) {
+ defect_flag = 1;
+ break;
+ }
+ }
+ if (defect_flag) {
+ disable_cnt--;
+ continue;
+ }
+ }
+
+ retval = ms_read_extra_data(chip, phy_blk, 0,
+ extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS) {
+ RTSX_DEBUGP("read extra data fail\n");
+ ms_set_bad_block(chip, phy_blk);
+ continue;
+ }
+
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (!(extra[1] & NOT_TRANSLATION_TABLE)) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ extra[2] = 0xff;
+ extra[3] = 0xff;
+ }
+ }
+ }
+
+ if (!(extra[0] & BLOCK_OK))
+ continue;
+ if (!(extra[1] & NOT_BOOT_BLOCK))
+ continue;
+ if ((extra[0] & PAGE_OK) != PAGE_OK)
+ continue;
+
+ log_blk = ((u16)extra[2] << 8) | extra[3];
+
+ if (log_blk == 0xFFFF) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ }
+ ms_set_unused_block(chip, phy_blk);
+ continue;
+ }
+
+ if ((log_blk < ms_start_idx[seg_no]) ||
+ (log_blk >= ms_start_idx[seg_no+1])) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ }
+ ms_set_unused_block(chip, phy_blk);
+ continue;
+ }
+
+ if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] == 0xFFFF) {
+ segment->l2p_table[log_blk - ms_start_idx[seg_no]] = phy_blk;
+ continue;
+ }
+
+ us1 = extra[0] & 0x10;
+ tmp_blk = segment->l2p_table[log_blk - ms_start_idx[seg_no]];
+ retval = ms_read_extra_data(chip, tmp_blk, 0,
+ extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ us2 = extra[0] & 0x10;
+
+ (void)ms_arbitrate_l2p(chip, phy_blk,
+ log_blk-ms_start_idx[seg_no], us1, us2);
+ continue;
+ }
+
+ segment->build_flag = 1;
+
+ RTSX_DEBUGP("unused block count: %d\n", segment->unused_blk_cnt);
+
+ /* Logical Address Confirmation Process */
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (segment->unused_blk_cnt < 2)
+ chip->card_wp |= MS_CARD;
+ } else {
+ if (segment->unused_blk_cnt < 1)
+ chip->card_wp |= MS_CARD;
+ }
+
+ if (chip->card_wp & MS_CARD)
+ return STATUS_SUCCESS;
+
+ for (log_blk = ms_start_idx[seg_no];
+ log_blk < ms_start_idx[seg_no + 1]; log_blk++) {
+ if (segment->l2p_table[log_blk-ms_start_idx[seg_no]] == 0xFFFF) {
+ phy_blk = ms_get_unused_block(chip, seg_no);
+ if (phy_blk == 0xFFFF) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ retval = ms_init_page(chip, phy_blk, log_blk, 0, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, BUILD_FAIL);
+
+ segment->l2p_table[log_blk-ms_start_idx[seg_no]] = phy_blk;
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (segment->unused_blk_cnt < 2) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ } else {
+ if (segment->unused_blk_cnt < 1) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ }
+ }
+ }
+
+ /* Make boot block be the first normal block */
+ if (seg_no == 0) {
+ for (log_blk = 0; log_blk < 494; log_blk++) {
+ tmp_blk = segment->l2p_table[log_blk];
+ if (tmp_blk < ms_card->boot_block) {
+ RTSX_DEBUGP("Boot block is not the first normal block.\n");
+
+ if (chip->card_wp & MS_CARD)
+ break;
+
+ phy_blk = ms_get_unused_block(chip, 0);
+ retval = ms_copy_page(chip, tmp_blk, phy_blk,
+ log_blk, 0, ms_card->page_off + 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ segment->l2p_table[log_blk] = phy_blk;
+
+ retval = ms_set_bad_block(chip, tmp_blk);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+
+BUILD_FAIL:
+ segment->build_flag = 0;
+ if (segment->l2p_table) {
+ vfree(segment->l2p_table);
+ segment->l2p_table = NULL;
+ }
+ if (segment->free_table) {
+ vfree(segment->free_table);
+ segment->free_table = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+
+int reset_ms_card(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ memset(ms_card, 0, sizeof(struct ms_info));
+
+ retval = enable_card_clock(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_card->ms_type = 0;
+
+ retval = reset_ms_pro(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->check_ms_flow) {
+ retval = reset_ms(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!CHK_MSPRO(ms_card)) {
+ /* Build table for the last segment,
+ * to check if L2P table block exists, erasing it
+ */
+ retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_DEBUGP("ms_card->ms_type = 0x%x\n", ms_card->ms_type);
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_set_rw_cmd(struct rtsx_chip *chip,
+ u32 start_sec, u16 sec_cnt, u8 cmd)
+{
+ int retval, i;
+ u8 data[8];
+
+ data[0] = cmd;
+ data[1] = (u8)(sec_cnt >> 8);
+ data[2] = (u8)sec_cnt;
+ data[3] = (u8)(start_sec >> 24);
+ data[4] = (u8)(start_sec >> 16);
+ data[5] = (u8)(start_sec >> 8);
+ data[6] = (u8)start_sec;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7,
+ WAIT_INT, data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+
+void mspro_stop_seq_mode(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (ms_card->seq_mode) {
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ ms_card->seq_mode = 0;
+ ms_card->total_sec_cnt = 0;
+ ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ }
+}
+
+static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (chip->asic_code) {
+ if (ms_card->ms_clock > 30)
+ ms_card->ms_clock -= 20;
+ } else {
+ if (ms_card->ms_clock == CLK_80)
+ ms_card->ms_clock = CLK_60;
+ else if (ms_card->ms_clock == CLK_60)
+ ms_card->ms_clock = CLK_40;
+ }
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
+ struct rtsx_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, mode_2k = 0;
+ u16 count;
+ u8 val, trans_mode, rw_tpc, rw_cmd;
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ ms_card->cleanup_counter = 0;
+
+ if (CHK_MSHG(ms_card)) {
+ if ((start_sector % 4) || (sector_cnt % 4)) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_LONG_DATA;
+ rw_cmd = PRO_READ_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_LONG_DATA;
+ rw_cmd = PRO_WRITE_DATA;
+ }
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_QUAD_DATA;
+ rw_cmd = PRO_READ_2K_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_QUAD_DATA;
+ rw_cmd = PRO_WRITE_2K_DATA;
+ }
+ mode_2k = 1;
+ }
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_LONG_DATA;
+ rw_cmd = PRO_READ_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_LONG_DATA;
+ rw_cmd = PRO_WRITE_DATA;
+ }
+ }
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ trans_mode = MS_TM_AUTO_READ;
+ else
+ trans_mode = MS_TM_AUTO_WRITE;
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, &val);
+
+ if (ms_card->seq_mode) {
+ if ((ms_card->pre_dir != srb->sc_data_direction)
+ || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector)
+ || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ))
+ || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ))
+ || !(val & MS_INT_BREQ)
+ || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
+ ms_card->seq_mode = 0;
+ ms_card->total_sec_cnt = 0;
+ if (val & MS_INT_BREQ) {
+ retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ }
+ }
+ }
+
+ if (!ms_card->seq_mode) {
+ ms_card->total_sec_cnt = 0;
+ if (sector_cnt >= SEQ_START_CRITERIA) {
+ if ((ms_card->capacity - start_sector) > 0xFE00)
+ count = 0xFE00;
+ else
+ count = (u16)(ms_card->capacity - start_sector);
+
+ if (count > sector_cnt) {
+ if (mode_2k)
+ ms_card->seq_mode |= MODE_2K_SEQ;
+ else
+ ms_card->seq_mode |= MODE_512_SEQ;
+ }
+ } else {
+ count = sector_cnt;
+ }
+ retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->seq_mode = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
+ WAIT_INT, mode_2k, scsi_sg_count(srb),
+ scsi_sglist(srb), scsi_bufflen(srb));
+ if (retval != STATUS_SUCCESS) {
+ ms_card->seq_mode = 0;
+ rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ rtsx_clear_ms_error(chip);
+
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ chip->rw_need_retry = 0;
+ RTSX_DEBUGP("No card exist, exit mspro_rw_multi_sector\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & MS_INT_BREQ)
+ ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ RTSX_DEBUGP("MSPro CRC error, tune clock!\n");
+ chip->rw_need_retry = 1;
+ ms_auto_tune_clock(chip);
+ }
+
+ TRACE_RET(chip, retval);
+ }
+
+ if (ms_card->seq_mode) {
+ ms_card->pre_sec_addr = start_sector;
+ ms_card->pre_sec_cnt = sector_cnt;
+ ms_card->pre_dir = srb->sc_data_direction;
+ ms_card->total_sec_cnt += sector_cnt;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_read_format_progress(struct rtsx_chip *chip,
+ const int short_data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u32 total_progress, cur_progress;
+ u8 cnt, tmp;
+ u8 data[8];
+
+ RTSX_DEBUGP("mspro_read_format_progress, short_data_len = %d\n",
+ short_data_len);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(tmp & MS_INT_BREQ)) {
+ if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) {
+ ms_card->format_status = FORMAT_SUCCESS;
+ return STATUS_SUCCESS;
+ }
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (short_data_len >= 256)
+ cnt = 0;
+ else
+ cnt = (u8)short_data_len;
+
+ retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
+ MS_NO_CHECK_INT);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
+ data, 8);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ total_progress = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ cur_progress = (data[4] << 24) | (data[5] << 16) |
+ (data[6] << 8) | data[7];
+
+ RTSX_DEBUGP("total_progress = %d, cur_progress = %d\n",
+ total_progress, cur_progress);
+
+ if (total_progress == 0) {
+ ms_card->progress = 0;
+ } else {
+ u64 ulltmp = (u64)cur_progress * (u64)65535;
+ do_div(ulltmp, total_progress);
+ ms_card->progress = (u16)ulltmp;
+ }
+ RTSX_DEBUGP("progress = %d\n", ms_card->progress);
+
+ for (i = 0; i < 5000; i++) {
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (tmp & (MS_INT_CED | MS_INT_CMDNK |
+ MS_INT_BREQ | MS_INT_ERR))
+ break;
+
+ wait_timeout(1);
+ }
+
+ retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, 0);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (i == 5000) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (tmp & MS_INT_CED) {
+ ms_card->format_status = FORMAT_SUCCESS;
+ ms_card->pro_under_formatting = 0;
+ } else if (tmp & MS_INT_BREQ) {
+ ms_card->format_status = FORMAT_IN_PROGRESS;
+ } else {
+ ms_card->format_status = FORMAT_FAIL;
+ ms_card->pro_under_formatting = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void mspro_polling_format_status(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int i;
+
+ if (ms_card->pro_under_formatting &&
+ (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ for (i = 0; i < 65535; i++) {
+ mspro_read_format_progress(chip, MS_SHORT_DATA_LEN);
+ if (ms_card->format_status != FORMAT_IN_PROGRESS)
+ break;
+ }
+ }
+
+ return;
+}
+
+int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ int short_data_len, int quick_format)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 buf[8], tmp;
+ u16 para;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ memset(buf, 0, 2);
+ switch (short_data_len) {
+ case 32:
+ buf[0] = 0;
+ break;
+ case 64:
+ buf[0] = 1;
+ break;
+ case 128:
+ buf[0] = 2;
+ break;
+ case 256:
+ default:
+ buf[0] = 3;
+ break;
+ }
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, 1,
+ NO_WAIT_INT, buf, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (quick_format)
+ para = 0x0000;
+ else
+ para = 0x0001;
+
+ retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, &tmp);
+
+ if (tmp & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((tmp & (MS_INT_BREQ | MS_INT_CED)) == MS_INT_BREQ) {
+ ms_card->pro_under_formatting = 1;
+ ms_card->progress = 0;
+ ms_card->format_status = FORMAT_IN_PROGRESS;
+ return STATUS_SUCCESS;
+ }
+
+ if (tmp & MS_INT_CED) {
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ ms_card->format_status = FORMAT_SUCCESS;
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_NO_SENSE);
+ return STATUS_SUCCESS;
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+
+static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
+ u16 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6];
+ u8 *ptr;
+
+ retval = ms_read_extra_data(chip, phy_blk, start_page,
+ extra, MS_EXTRA_SIZE);
+ if (retval == STATUS_SUCCESS) {
+ if ((extra[1] & 0x30) != 0x30) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(phy_blk >> 8);
+ data[3] = (u8)phy_blk;
+ data[4] = 0;
+ data[5] = start_page;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
+ data, 6);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = buf;
+
+ for (page_addr = start_page; page_addr < end_page; page_addr++) {
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_ERR) {
+ if (val & INT_REG_BREQ) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (!(chip->card_wp & MS_CARD)) {
+ reset_ms(chip);
+ ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE);
+ ms_write_extra_data(chip, phy_blk,
+ page_addr, extra, MS_EXTRA_SIZE);
+ }
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (page_addr == (end_page - 1)) {
+ if (!(val & INT_REG_CED)) {
+ retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
+ &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ trans_cfg = NO_WAIT_INT;
+ } else {
+ trans_cfg = WAIT_INT;
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
+ 0xFF, trans_cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_READ);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
+ 512, scsi_sg_count(chip->srb),
+ index, offset, DMA_FROM_DEVICE,
+ chip->ms_timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ }
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ }
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (scsi_sg_count(chip->srb) == 0)
+ ptr += 512;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
+ u16 new_blk, u16 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 page_addr, val, data[16];
+ u8 *ptr;
+
+ if (!start_page) {
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(old_blk >> 8);
+ data[3] = (u8)old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT,
+ data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, (6 + MS_EXTRA_SIZE));
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(new_blk >> 8);
+ data[3] = (u8)new_blk;
+ if ((end_page - start_page) == 1)
+ data[4] = 0x20;
+ else
+ data[4] = 0;
+
+ data[5] = start_page;
+ data[6] = 0xF8;
+ data[7] = 0xFF;
+ data[8] = (u8)(log_blk >> 8);
+ data[9] = (u8)log_blk;
+
+ for (i = 0x0A; i < 0x10; i++)
+ data[i] = 0xFF;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 6 + MS_EXTRA_SIZE,
+ NO_WAIT_INT, data, 16);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = buf;
+ for (page_addr = start_page; page_addr < end_page; page_addr++) {
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ udelay(30);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
+ 0xFF, WRITE_PAGE_DATA);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
+ 0xFF, WAIT_INT);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
+ 512, scsi_sg_count(chip->srb),
+ index, offset, DMA_TO_DEVICE,
+ chip->ms_timeout);
+ if (retval < 0) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rtsx_clear_ms_error(chip);
+
+ if (retval == -ETIMEDOUT)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((end_page - start_page) == 1) {
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (page_addr == (end_page - 1)) {
+ if (!(val & INT_REG_CED)) {
+ retval = ms_send_cmd(chip, BLOCK_END,
+ WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1,
+ NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((page_addr == (end_page - 1)) ||
+ (page_addr == ms_card->page_off)) {
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip,
+ MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if (scsi_sg_count(chip->srb) == 0)
+ ptr += 512;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 page_off)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, seg_no;
+
+ retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
+ page_off, ms_card->page_off + 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ seg_no = old_blk >> 9;
+
+ if (MS_TST_BAD_BLOCK_FLG(ms_card)) {
+ MS_CLR_BAD_BLOCK_FLG(ms_card);
+ ms_set_bad_block(chip, old_blk);
+ } else {
+ retval = ms_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS)
+ ms_set_unused_block(chip, old_blk);
+ }
+
+ ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page)
+{
+ int retval;
+
+ if (start_page) {
+ retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
+ 0, start_page);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef MS_DELAY_WRITE
+int ms_delay_write(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+ int retval;
+
+ if (delay_write->delay_write_flag) {
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ delay_write->delay_write_flag = 0;
+ retval = ms_finish_write(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock,
+ delay_write->pageoff);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+}
+
+static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, seg_no;
+ unsigned int index = 0, offset = 0;
+ u16 old_blk = 0, new_blk = 0, log_blk, total_sec_cnt = sector_cnt;
+ u8 start_page, end_page = 0, page_cnt;
+ u8 *ptr;
+#ifdef MS_DELAY_WRITE
+ struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+#endif
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ ms_card->cleanup_counter = 0;
+
+ ptr = (u8 *)scsi_sglist(srb);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ log_blk = (u16)(start_sector >> ms_card->block_shift);
+ start_page = (u8)(start_sector & ms_card->page_off);
+
+ for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
+ if (log_blk < ms_start_idx[seg_no+1])
+ break;
+ }
+
+ if (ms_card->segment[seg_no].build_flag == 0) {
+ retval = ms_build_l2p_tbl(chip, seg_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= MS_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+#ifdef MS_DELAY_WRITE
+ if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ retval = ms_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock, log_blk,
+ delay_write->pageoff, start_page);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page == delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else {
+ retval = ms_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ old_blk = ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ new_blk = ms_get_unused_block(chip, seg_no);
+ if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_prepare_write(chip, old_blk, new_blk,
+ log_blk, start_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef MS_DELAY_WRITE
+ }
+#endif
+ } else {
+#ifdef MS_DELAY_WRITE
+ retval = ms_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ old_blk = ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ if (old_blk == 0xFFFF) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
+ seg_no, old_blk, new_blk);
+
+ while (total_sec_cnt) {
+ if ((start_page + total_sec_cnt) > (ms_card->page_off + 1))
+ end_page = ms_card->page_off + 1;
+ else
+ end_page = start_page + (u8)total_sec_cnt;
+
+ page_cnt = end_page - start_page;
+
+ RTSX_DEBUGP("start_page = %d, end_page = %d, page_cnt = %d\n",
+ start_page, end_page, page_cnt);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ retval = ms_read_multiple_pages(chip,
+ old_blk, log_blk, start_page, end_page,
+ ptr, &index, &offset);
+ } else {
+ retval = ms_write_multiple_pages(chip, old_blk,
+ new_blk, log_blk, start_page, end_page,
+ ptr, &index, &offset);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ toggle_gpio(chip, 1);
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (end_page == (ms_card->page_off + 1)) {
+ retval = ms_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS)
+ ms_set_unused_block(chip, old_blk);
+
+ ms_set_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no],
+ new_blk);
+ }
+ }
+
+ total_sec_cnt -= page_cnt;
+ if (scsi_sg_count(srb) == 0)
+ ptr += page_cnt * 512;
+
+ if (total_sec_cnt == 0)
+ break;
+
+ log_blk++;
+
+ for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
+ seg_no++) {
+ if (log_blk < ms_start_idx[seg_no+1])
+ break;
+ }
+
+ if (ms_card->segment[seg_no].build_flag == 0) {
+ retval = ms_build_l2p_tbl(chip, seg_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= MS_CARD;
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ old_blk = ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ if (old_blk == 0xFFFF) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ new_blk = ms_get_unused_block(chip, seg_no);
+ if (new_blk == 0xFFFF) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
+ seg_no, old_blk, new_blk);
+
+ start_page = 0;
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (end_page < (ms_card->page_off + 1)) {
+#ifdef MS_DELAY_WRITE
+ delay_write->delay_write_flag = 1;
+ delay_write->old_phyblock = old_blk;
+ delay_write->new_phyblock = new_blk;
+ delay_write->logblock = log_blk;
+ delay_write->pageoff = end_page;
+#else
+ retval = ms_finish_write(chip, old_blk, new_blk,
+ log_blk, end_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return STATUS_SUCCESS;
+}
+
+int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (CHK_MSPRO(ms_card))
+ retval = mspro_rw_multi_sector(srb, chip, start_sector,
+ sector_cnt);
+ else
+ retval = ms_rw_multi_sector(srb, chip, start_sector,
+ sector_cnt);
+
+ return retval;
+}
+
+
+void ms_free_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int i = 0;
+
+ if (ms_card->segment != NULL) {
+ for (i = 0; i < ms_card->segment_cnt; i++) {
+ if (ms_card->segment[i].l2p_table != NULL) {
+ vfree(ms_card->segment[i].l2p_table);
+ ms_card->segment[i].l2p_table = NULL;
+ }
+ if (ms_card->segment[i].free_table != NULL) {
+ vfree(ms_card->segment[i].free_table);
+ ms_card->segment[i].free_table = NULL;
+ }
+ }
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
+ }
+}
+
+#ifdef SUPPORT_MAGIC_GATE
+
+#ifdef READ_BYTES_WAIT_INT
+static int ms_poll_int(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANS_CFG, MS_INT_CED, MS_INT_CED);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ val = *rtsx_get_cmd_data(chip);
+ if (val & MS_INT_ERR)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+#ifdef MS_SAMPLE_INT_ERR
+static int check_ms_err(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ retval = rtsx_read_register(chip, MS_TRANSFER, &val);
+ if (retval != STATUS_SUCCESS)
+ return 1;
+ if (val & MS_TRANSFER_ERR)
+ return 1;
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS)
+ return 1;
+
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return 1;
+
+ return 0;
+}
+#else
+static int check_ms_err(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ retval = rtsx_read_register(chip, MS_TRANSFER, &val);
+ if (retval != STATUS_SUCCESS)
+ return 1;
+ if (val & MS_TRANSFER_ERR)
+ return 1;
+
+ return 0;
+}
+#endif
+
+static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
+{
+ int retval, i;
+ u8 data[8];
+
+ data[0] = cmd;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = 0;
+ data[6] = entry_num;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT,
+ data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (check_ms_err(chip)) {
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
+ u8 mg_entry_num)
+{
+ int retval;
+ u8 buf[6];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (type == 0)
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1);
+ else
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf[0] = 0;
+ buf[1] = 0;
+ if (type == 1) {
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = mg_entry_num;
+ }
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, (type == 0) ? 1 : 6,
+ NO_WAIT_INT, buf, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ int i;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf1[32], buf2[12];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (scsi_bufflen(srb) < 12) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_SET_LID, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memset(buf1, 0, 32);
+ rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
+ for (i = 0; i < 8; i++)
+ buf1[8+i] = buf2[4+i];
+
+ retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
+ buf1, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval = STATUS_FAIL;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(1540, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ buf[0] = 0x04;
+ buf[1] = 0x1A;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+
+ retval = mg_send_ex_cmd(chip, MG_GET_LEKB, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 3, WAIT_INT, 0, 0, buf + 4, 1536);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 1052, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, bufflen, srb);
+
+GetEKBFinish:
+ kfree(buf);
+ return retval;
+}
+
+int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ int i;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf[32];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_GET_ID, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
+ buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(ms_card->magic_gate_id, buf, 16);
+
+#ifdef READ_BYTES_WAIT_INT
+ retval = ms_poll_int(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ retval = mg_send_ex_cmd(chip, MG_SET_RD, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 12, scsi_bufflen(srb));
+ rtsx_stor_get_xfer_buf(buf, bufflen, srb);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = buf[4+i];
+
+ for (i = 0; i < 24; i++)
+ buf[8+i] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
+ 32, WAIT_INT, buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->mg_auth = 0;
+
+ return STATUS_SUCCESS;
+}
+
+int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf1[32], buf2[36];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_MAKE_RMS, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
+ buf1, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ buf2[0] = 0x00;
+ buf2[1] = 0x22;
+ buf2[2] = 0x00;
+ buf2[3] = 0x00;
+
+ memcpy(buf2 + 4, ms_card->magic_gate_id, 16);
+ memcpy(buf2 + 20, buf1, 16);
+
+ bufflen = min_t(int, 36, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf2, bufflen, srb);
+
+#ifdef READ_BYTES_WAIT_INT
+ retval = ms_poll_int(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int i;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf[32];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_MAKE_KSE, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 12, scsi_bufflen(srb));
+ rtsx_stor_get_xfer_buf(buf, bufflen, srb);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = buf[4+i];
+
+ for (i = 0; i < 24; i++)
+ buf[8+i] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
+ buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->mg_auth = 1;
+
+ return STATUS_SUCCESS;
+}
+
+int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(1028, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ buf[0] = 0x04;
+ buf[1] = 0x02;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+
+ retval = mg_send_ex_cmd(chip, MG_GET_IBD, ms_card->mg_entry_num);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ rtsx_clear_ms_error(chip);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 1028, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, bufflen, srb);
+
+GetICVFinish:
+ kfree(buf);
+ return retval;
+}
+
+int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+#ifdef MG_SET_ICV_SLOW
+ int i;
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(1028, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ bufflen = min_t(int, 1028, scsi_bufflen(srb));
+ rtsx_stor_get_xfer_buf(buf, bufflen, srb);
+
+ retval = mg_send_ex_cmd(chip, MG_SET_IBD, ms_card->mg_entry_num);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ }
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+
+#ifdef MG_SET_ICV_SLOW
+ for (i = 0; i < 2; i++) {
+ udelay(50);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
+ 0xFF, PRO_WRITE_LONG_DATA);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i*512,
+ 512, 0, DMA_TO_DEVICE, 3000);
+ if ((retval < 0) || check_ms_err(chip)) {
+ rtsx_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ }
+ retval = STATUS_FAIL;
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+ }
+#else
+ retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
+ rtsx_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ }
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+#endif
+
+SetICVFinish:
+ kfree(buf);
+ return retval;
+}
+
+#endif /* SUPPORT_MAGIC_GATE */
+
+void ms_cleanup_work(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_MSPRO(ms_card)) {
+ if (ms_card->seq_mode) {
+ RTSX_DEBUGP("MS Pro: stop transmission\n");
+ mspro_stop_seq_mode(chip);
+ ms_card->cleanup_counter = 0;
+ }
+ if (CHK_MSHG(ms_card)) {
+ rtsx_write_register(chip, MS_CFG,
+ MS_2K_SECTOR_MODE, 0x00);
+ }
+ }
+#ifdef MS_DELAY_WRITE
+ else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) {
+ RTSX_DEBUGP("MS: delay write\n");
+ ms_delay_write(chip);
+ ms_card->cleanup_counter = 0;
+ }
+#endif
+}
+
+int ms_power_off_card3v3(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = disable_card_clock(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->asic_code) {
+ retval = ms_pull_ctl_disable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_MS_PULL_CTL_BIT | 0x20, FPGA_MS_PULL_CTL_BIT);
+ }
+ RTSX_WRITE_REG(chip, CARD_OE, MS_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int release_ms_card(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTSX_DEBUGP("release_ms_card\n");
+
+#ifdef MS_DELAY_WRITE
+ ms_card->delay_write.delay_write_flag = 0;
+#endif
+ ms_card->pro_under_formatting = 0;
+
+ chip->card_ready &= ~MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ chip->card_wp &= ~MS_CARD;
+
+ ms_free_l2p_tbl(chip);
+
+ memset(ms_card->raw_sys_info, 0, 96);
+#ifdef SUPPORT_PCGL_1P18
+ memset(ms_card->raw_model_name, 0, 48);
+#endif
+
+ retval = ms_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
new file mode 100644
index 0000000..26c5b03
--- /dev/null
+++ b/drivers/staging/rts5208/ms.h
@@ -0,0 +1,227 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_MS_H
+#define __REALTEK_RTSX_MS_H
+
+#define MS_DELAY_WRITE
+
+#define MS_MAX_RETRY_COUNT 3
+
+#define MS_EXTRA_SIZE 0x9
+
+#define WRT_PRTCT 0x01
+
+/* Error Code */
+#define MS_NO_ERROR 0x00
+#define MS_CRC16_ERROR 0x80
+#define MS_TO_ERROR 0x40
+#define MS_NO_CARD 0x20
+#define MS_NO_MEMORY 0x10
+#define MS_CMD_NK 0x08
+#define MS_FLASH_READ_ERROR 0x04
+#define MS_FLASH_WRITE_ERROR 0x02
+#define MS_BREQ_ERROR 0x01
+#define MS_NOT_FOUND 0x03
+
+/* Transfer Protocol Command */
+#define READ_PAGE_DATA 0x02
+#define READ_REG 0x04
+#define GET_INT 0x07
+#define WRITE_PAGE_DATA 0x0D
+#define WRITE_REG 0x0B
+#define SET_RW_REG_ADRS 0x08
+#define SET_CMD 0x0E
+
+#define PRO_READ_LONG_DATA 0x02
+#define PRO_READ_SHORT_DATA 0x03
+#define PRO_READ_REG 0x04
+#define PRO_READ_QUAD_DATA 0x05
+#define PRO_GET_INT 0x07
+#define PRO_WRITE_LONG_DATA 0x0D
+#define PRO_WRITE_SHORT_DATA 0x0C
+#define PRO_WRITE_QUAD_DATA 0x0A
+#define PRO_WRITE_REG 0x0B
+#define PRO_SET_RW_REG_ADRS 0x08
+#define PRO_SET_CMD 0x0E
+#define PRO_EX_SET_CMD 0x09
+
+#ifdef SUPPORT_MAGIC_GATE
+
+#define MG_GET_ID 0x40
+#define MG_SET_LID 0x41
+#define MG_GET_LEKB 0x42
+#define MG_SET_RD 0x43
+#define MG_MAKE_RMS 0x44
+#define MG_MAKE_KSE 0x45
+#define MG_SET_IBD 0x46
+#define MG_GET_IBD 0x47
+
+#endif
+
+#ifdef XC_POWERCLASS
+#define XC_CHG_POWER 0x16
+#endif
+
+#define BLOCK_READ 0xAA
+#define BLOCK_WRITE 0x55
+#define BLOCK_END 0x33
+#define BLOCK_ERASE 0x99
+#define FLASH_STOP 0xCC
+
+#define SLEEP 0x5A
+#define CLEAR_BUF 0xC3
+#define MS_RESET 0x3C
+
+#define PRO_READ_DATA 0x20
+#define PRO_WRITE_DATA 0x21
+#define PRO_READ_ATRB 0x24
+#define PRO_STOP 0x25
+#define PRO_ERASE 0x26
+#define PRO_READ_2K_DATA 0x27
+#define PRO_WRITE_2K_DATA 0x28
+
+#define PRO_FORMAT 0x10
+#define PRO_SLEEP 0x11
+
+#define IntReg 0x01
+#define StatusReg0 0x02
+#define StatusReg1 0x03
+
+#define SystemParm 0x10
+#define BlockAdrs 0x11
+#define CMDParm 0x14
+#define PageAdrs 0x15
+
+#define OverwriteFlag 0x16
+#define ManagemenFlag 0x17
+#define LogicalAdrs 0x18
+#define ReserveArea 0x1A
+
+#define Pro_IntReg 0x01
+#define Pro_StatusReg 0x02
+#define Pro_TypeReg 0x04
+#define Pro_IFModeReg 0x05
+#define Pro_CatagoryReg 0x06
+#define Pro_ClassReg 0x07
+
+
+#define Pro_SystemParm 0x10
+#define Pro_DataCount1 0x11
+#define Pro_DataCount0 0x12
+#define Pro_DataAddr3 0x13
+#define Pro_DataAddr2 0x14
+#define Pro_DataAddr1 0x15
+#define Pro_DataAddr0 0x16
+
+#define Pro_TPCParm 0x17
+#define Pro_CMDParm 0x18
+
+#define INT_REG_CED 0x80
+#define INT_REG_ERR 0x40
+#define INT_REG_BREQ 0x20
+#define INT_REG_CMDNK 0x01
+
+#define BLOCK_BOOT 0xC0
+#define BLOCK_OK 0x80
+#define PAGE_OK 0x60
+#define DATA_COMPL 0x10
+
+#define NOT_BOOT_BLOCK 0x4
+#define NOT_TRANSLATION_TABLE 0x8
+
+#define HEADER_ID0 PPBUF_BASE2
+#define HEADER_ID1 (PPBUF_BASE2 + 1)
+#define DISABLED_BLOCK0 (PPBUF_BASE2 + 0x170 + 4)
+#define DISABLED_BLOCK1 (PPBUF_BASE2 + 0x170 + 5)
+#define DISABLED_BLOCK2 (PPBUF_BASE2 + 0x170 + 6)
+#define DISABLED_BLOCK3 (PPBUF_BASE2 + 0x170 + 7)
+#define BLOCK_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 2)
+#define BLOCK_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 3)
+#define BLOCK_COUNT_0 (PPBUF_BASE2 + 0x1a0 + 4)
+#define BLOCK_COUNT_1 (PPBUF_BASE2 + 0x1a0 + 5)
+#define EBLOCK_COUNT_0 (PPBUF_BASE2 + 0x1a0 + 6)
+#define EBLOCK_COUNT_1 (PPBUF_BASE2 + 0x1a0 + 7)
+#define PAGE_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 8)
+#define PAGE_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 9)
+
+#define MS_Device_Type (PPBUF_BASE2 + 0x1D8)
+
+#define MS_4bit_Support (PPBUF_BASE2 + 0x1D3)
+
+#define setPS_NG 1
+#define setPS_Error 0
+
+#define PARALLEL_8BIT_IF 0x40
+#define PARALLEL_4BIT_IF 0x00
+#define SERIAL_IF 0x80
+
+#define BUF_FULL 0x10
+#define BUF_EMPTY 0x20
+
+#define MEDIA_BUSY 0x80
+#define FLASH_BUSY 0x40
+#define DATA_ERROR 0x20
+#define STS_UCDT 0x10
+#define EXTRA_ERROR 0x08
+#define STS_UCEX 0x04
+#define FLAG_ERROR 0x02
+#define STS_UCFG 0x01
+
+#define MS_SHORT_DATA_LEN 32
+
+#define FORMAT_SUCCESS 0
+#define FORMAT_FAIL 1
+#define FORMAT_IN_PROGRESS 2
+
+#define MS_SET_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag |= 0x80)
+#define MS_CLR_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag &= 0x7F)
+#define MS_TST_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag & 0x80)
+
+void mspro_polling_format_status(struct rtsx_chip *chip);
+
+void mspro_stop_seq_mode(struct rtsx_chip *chip);
+int reset_ms_card(struct rtsx_chip *chip);
+int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt);
+int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ int short_data_len, int quick_format);
+void ms_free_l2p_tbl(struct rtsx_chip *chip);
+void ms_cleanup_work(struct rtsx_chip *chip);
+int ms_power_off_card3v3(struct rtsx_chip *chip);
+int release_ms_card(struct rtsx_chip *chip);
+#ifdef MS_DELAY_WRITE
+int ms_delay_write(struct rtsx_chip *chip);
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+#endif
+
+#endif /* __REALTEK_RTSX_MS_H */
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
new file mode 100644
index 0000000..8586ac5
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx.c
@@ -0,0 +1,1071 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "general.h"
+
+#include "ms.h"
+#include "sd.h"
+#include "xd.h"
+
+MODULE_DESCRIPTION("Realtek PCI-Express card reader rts5208/rts5288 driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int delay_use = 1;
+module_param(delay_use, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
+
+static int ss_en;
+module_param(ss_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_en, "enable selective suspend");
+
+static int ss_interval = 50;
+module_param(ss_interval, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_interval, "Interval to enter ss state in seconds");
+
+static int auto_delink_en;
+module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+
+static unsigned char aspm_l0s_l1_en;
+module_param(aspm_l0s_l1_en, byte, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(aspm_l0s_l1_en, "enable device aspm");
+
+static int msi_en;
+module_param(msi_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msi_en, "enable msi");
+
+static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
+
+/***********************************************************************
+ * Host functions
+ ***********************************************************************/
+
+static const char *host_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for PCI-Express Mass Storage devices";
+}
+
+static int slave_alloc(struct scsi_device *sdev)
+{
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+ * the extra data and many devices choke if asked for more or
+ * less than 36 bytes.
+ */
+ sdev->inquiry_len = 36;
+ return 0;
+}
+
+static int slave_configure(struct scsi_device *sdev)
+{
+ /* Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries. */
+ blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+
+ /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
+ * what is originally reported. We need this to avoid confusing
+ * the SCSI layer with devices that report 0 or 1, but need 10-byte
+ * commands (ala ATAPI devices behind certain bridges, or devices
+ * which simply have broken INQUIRY data).
+ *
+ * NOTE: This means /dev/sg programs (ala cdrecord) will get the
+ * actual information. This seems to be the preference for
+ * programs like that.
+ *
+ * NOTE: This also means that /proc/scsi/scsi and sysfs may report
+ * the actual value or the modified one, depending on where the
+ * data comes from.
+ */
+ if (sdev->scsi_level < SCSI_2)
+ sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+
+ return 0;
+}
+
+
+/***********************************************************************
+ * /proc/scsi/ functions
+ ***********************************************************************/
+
+/* we use this macro to help us write into the buffer */
+#undef SPRINTF
+#define SPRINTF(args...) \
+ do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+
+/* queue a command */
+/* This is always called with scsi_lock(host) held */
+static int queuecommand_lck(struct scsi_cmnd *srb,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
+ struct rtsx_chip *chip = dev->chip;
+
+ /* check for state-transition errors */
+ if (chip->srb != NULL) {
+ dev_err(&dev->pci->dev, "Error in %s: chip->srb = %p\n",
+ __func__, chip->srb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* fail the command if we are disconnecting */
+ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ dev_info(&dev->pci->dev, "Fail command during disconnect\n");
+ srb->result = DID_NO_CONNECT << 16;
+ done(srb);
+ return 0;
+ }
+
+ /* enqueue the command and wake up the control thread */
+ srb->scsi_done = done;
+ chip->srb = srb;
+ complete(&dev->cmnd_ready);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(queuecommand)
+
+/***********************************************************************
+ * Error handling functions
+ ***********************************************************************/
+
+/* Command timeout and abort */
+static int command_abort(struct scsi_cmnd *srb)
+{
+ struct Scsi_Host *host = srb->device->host;
+ struct rtsx_dev *dev = host_to_rtsx(host);
+ struct rtsx_chip *chip = dev->chip;
+
+ dev_info(&dev->pci->dev, "%s called\n", __func__);
+
+ scsi_lock(host);
+
+ /* Is this command still active? */
+ if (chip->srb != srb) {
+ scsi_unlock(host);
+ dev_info(&dev->pci->dev, "-- nothing to abort\n");
+ return FAILED;
+ }
+
+ rtsx_set_stat(chip, RTSX_STAT_ABORT);
+
+ scsi_unlock(host);
+
+ /* Wait for the aborted command to finish */
+ wait_for_completion(&dev->notify);
+
+ return SUCCESS;
+}
+
+/* This invokes the transport reset mechanism to reset the state of the
+ * device */
+static int device_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+ struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
+
+ dev_info(&dev->pci->dev, "%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+/* Simulate a SCSI bus reset by resetting the device's USB port. */
+static int bus_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+ struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
+
+ dev_info(&dev->pci->dev, "%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+
+/*
+ * this defines our host template, with which we'll allocate hosts
+ */
+
+static struct scsi_host_template rtsx_host_template = {
+ /* basic userland interface stuff */
+ .name = CR_DRIVER_NAME,
+ .proc_name = CR_DRIVER_NAME,
+ .info = host_info,
+
+ /* command interface -- queued only */
+ .queuecommand = queuecommand,
+
+ /* error and abort handlers */
+ .eh_abort_handler = command_abort,
+ .eh_device_reset_handler = device_reset,
+ .eh_bus_reset_handler = bus_reset,
+
+ /* queue commands only, only one command per LUN */
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+
+ /* unknown initiator id */
+ .this_id = -1,
+
+ .slave_alloc = slave_alloc,
+ .slave_configure = slave_configure,
+
+ /* lots of sg segments can be handled */
+ .sg_tablesize = SG_ALL,
+
+ /* limit the total size of a transfer to 120 KB */
+ .max_sectors = 240,
+
+ /* merge commands... this seems to help performance, but
+ * periodically someone should test to see which setting is more
+ * optimal.
+ */
+ .use_clustering = 1,
+
+ /* emulated HBA */
+ .emulated = 1,
+
+ /* we do our own delay after a device or bus reset */
+ .skip_settle_delay = 1,
+
+ /* module management */
+ .module = THIS_MODULE
+};
+
+
+static int rtsx_acquire_irq(struct rtsx_dev *dev)
+{
+ struct rtsx_chip *chip = dev->chip;
+
+ dev_info(&dev->pci->dev, "%s: chip->msi_en = %d, pci->irq = %d\n",
+ __func__, chip->msi_en, dev->pci->irq);
+
+ if (request_irq(dev->pci->irq, rtsx_interrupt,
+ chip->msi_en ? 0 : IRQF_SHARED,
+ CR_DRIVER_NAME, dev)) {
+ dev_err(&dev->pci->dev,
+ "rtsx: unable to grab IRQ %d, disabling device\n",
+ dev->pci->irq);
+ return -1;
+ }
+
+ dev->irq = dev->pci->irq;
+ pci_intx(dev->pci, !chip->msi_en);
+
+ return 0;
+}
+
+
+int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val)
+{
+ struct pci_dev *pdev;
+ u8 data;
+ u8 devfn = (dev << 3) | func;
+
+ pdev = pci_get_bus_and_slot(bus, devfn);
+ if (!pdev)
+ return -1;
+
+ pci_read_config_byte(pdev, offset, &data);
+ if (val)
+ *val = data;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * power management
+ */
+static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+ struct rtsx_chip *chip;
+
+ if (!dev)
+ return 0;
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ chip = dev->chip;
+
+ rtsx_do_before_power_down(chip, PM_S3);
+
+ if (dev->irq >= 0) {
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, (void *)dev);
+ dev->irq = -1;
+ }
+
+ if (chip->msi_en)
+ pci_disable_msi(pci);
+
+ pci_save_state(pci);
+ pci_enable_wake(pci, pci_choose_state(pci, state), 1);
+ pci_disable_device(pci);
+ pci_set_power_state(pci, pci_choose_state(pci, state));
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static int rtsx_resume(struct pci_dev *pci)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+ struct rtsx_chip *chip;
+
+ if (!dev)
+ return 0;
+
+ chip = dev->chip;
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+ if (pci_enable_device(pci) < 0) {
+ dev_err(&dev->pci->dev,
+ "%s: pci_enable_device failed, disabling device\n",
+ CR_DRIVER_NAME);
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ return -EIO;
+ }
+ pci_set_master(pci);
+
+ if (chip->msi_en) {
+ if (pci_enable_msi(pci) < 0)
+ chip->msi_en = 0;
+ }
+
+ if (rtsx_acquire_irq(dev) < 0) {
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ return -EIO;
+ }
+
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 0x00);
+ rtsx_init_chip(chip);
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void rtsx_shutdown(struct pci_dev *pci)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+ struct rtsx_chip *chip;
+
+ if (!dev)
+ return;
+
+ chip = dev->chip;
+
+ rtsx_do_before_power_down(chip, PM_S1);
+
+ if (dev->irq >= 0) {
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, (void *)dev);
+ dev->irq = -1;
+ }
+
+ if (chip->msi_en)
+ pci_disable_msi(pci);
+
+ pci_disable_device(pci);
+
+ return;
+}
+
+static int rtsx_control_thread(void *__dev)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_chip *chip = dev->chip;
+ struct Scsi_Host *host = rtsx_to_host(dev);
+
+ for (;;) {
+ if (wait_for_completion_interruptible(&dev->cmnd_ready))
+ break;
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ /* if the device has disconnected, we are free to exit */
+ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ dev_info(&dev->pci->dev, "-- rtsx-control exiting\n");
+ mutex_unlock(&dev->dev_mutex);
+ break;
+ }
+
+ /* lock access to the state */
+ scsi_lock(host);
+
+ /* has the command aborted ? */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
+ chip->srb->result = DID_ABORT << 16;
+ goto SkipForAbort;
+ }
+
+ scsi_unlock(host);
+
+ /* reject the command if the direction indicator
+ * is UNKNOWN
+ */
+ if (chip->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ dev_err(&dev->pci->dev, "UNKNOWN data direction\n");
+ chip->srb->result = DID_ERROR << 16;
+ }
+
+ /* reject if target != 0 or if LUN is higher than
+ * the maximum known LUN
+ */
+ else if (chip->srb->device->id) {
+ dev_err(&dev->pci->dev, "Bad target number (%d:%d)\n",
+ chip->srb->device->id,
+ chip->srb->device->lun);
+ chip->srb->result = DID_BAD_TARGET << 16;
+ }
+
+ else if (chip->srb->device->lun > chip->max_lun) {
+ dev_err(&dev->pci->dev, "Bad LUN (%d:%d)\n",
+ chip->srb->device->id,
+ chip->srb->device->lun);
+ chip->srb->result = DID_BAD_TARGET << 16;
+ }
+
+ /* we've got a command, let's do it! */
+ else {
+ RTSX_DEBUG(scsi_show_command(chip->srb));
+ rtsx_invoke_transport(chip->srb, chip);
+ }
+
+ /* lock access to the state */
+ scsi_lock(host);
+
+ /* did the command already complete because of a disconnect? */
+ if (!chip->srb)
+ ; /* nothing to do */
+
+ /* indicate that the command is done */
+ else if (chip->srb->result != DID_ABORT << 16) {
+ chip->srb->scsi_done(chip->srb);
+ } else {
+SkipForAbort:
+ dev_err(&dev->pci->dev, "scsi command aborted\n");
+ }
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
+ complete(&(dev->notify));
+
+ rtsx_set_stat(chip, RTSX_STAT_IDLE);
+ }
+
+ /* finished working on this command */
+ chip->srb = NULL;
+ scsi_unlock(host);
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ } /* for (;;) */
+
+ /* notify the exit routine that we're actually exiting now
+ *
+ * complete()/wait_for_completion() is similar to up()/down(),
+ * except that complete() is safe in the case where the structure
+ * is getting deleted in a parallel mode of execution (i.e. just
+ * after the down() -- that's necessary for the thread-shutdown
+ * case.
+ *
+ * complete_and_exit() goes even further than this -- it is safe in
+ * the case that the thread of the caller is going away (not just
+ * the structure) -- this is necessary for the module-remove case.
+ * This is important in preemption kernels, which transfer the flow
+ * of execution immediately upon a complete().
+ */
+ complete_and_exit(&dev->control_exit, 0);
+}
+
+
+static int rtsx_polling_thread(void *__dev)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_chip *chip = dev->chip;
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ sd_card->cleanup_counter = 0;
+ xd_card->cleanup_counter = 0;
+ ms_card->cleanup_counter = 0;
+
+ /* Wait until SCSI scan finished */
+ wait_timeout((delay_use + 5) * 1000);
+
+ for (;;) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(POLLING_INTERVAL);
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ /* if the device has disconnected, we are free to exit */
+ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ dev_info(&dev->pci->dev, "-- rtsx-polling exiting\n");
+ mutex_unlock(&dev->dev_mutex);
+ break;
+ }
+
+ mutex_unlock(&dev->dev_mutex);
+
+ mspro_polling_format_status(chip);
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ rtsx_polling_func(chip);
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ }
+
+ complete_and_exit(&dev->polling_exit, 0);
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
+{
+ struct rtsx_dev *dev = dev_id;
+ struct rtsx_chip *chip;
+ int retval;
+ u32 status;
+
+ if (dev)
+ chip = dev->chip;
+ else
+ return IRQ_NONE;
+
+ if (!chip)
+ return IRQ_NONE;
+
+ spin_lock(&dev->reg_lock);
+
+ retval = rtsx_pre_handle_interrupt(chip);
+ if (retval == STATUS_FAIL) {
+ spin_unlock(&dev->reg_lock);
+ if (chip->int_reg == 0xFFFFFFFF)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+ }
+
+ status = chip->int_reg;
+
+ if (dev->check_card_cd) {
+ if (!(dev->check_card_cd & status)) {
+ /* card not exist, return TRANS_RESULT_FAIL */
+ dev->trans_result = TRANS_RESULT_FAIL;
+ if (dev->done)
+ complete(dev->done);
+ goto Exit;
+ }
+ }
+
+ if (status & (NEED_COMPLETE_INT | DELINK_INT)) {
+ if (status & (TRANS_FAIL_INT | DELINK_INT)) {
+ if (status & DELINK_INT)
+ RTSX_SET_DELINK(chip);
+ dev->trans_result = TRANS_RESULT_FAIL;
+ if (dev->done)
+ complete(dev->done);
+ } else if (status & TRANS_OK_INT) {
+ dev->trans_result = TRANS_RESULT_OK;
+ if (dev->done)
+ complete(dev->done);
+ } else if (status & DATA_DONE_INT) {
+ dev->trans_result = TRANS_NOT_READY;
+ if (dev->done && (dev->trans_state == STATE_TRANS_SG))
+ complete(dev->done);
+ }
+ }
+
+Exit:
+ spin_unlock(&dev->reg_lock);
+ return IRQ_HANDLED;
+}
+
+
+/* Release all our dynamic resources */
+static void rtsx_release_resources(struct rtsx_dev *dev)
+{
+ dev_info(&dev->pci->dev, "-- %s\n", __func__);
+
+ /* Tell the control thread to exit. The SCSI host must
+ * already have been removed so it won't try to queue
+ * any more commands.
+ */
+ dev_info(&dev->pci->dev, "-- sending exit command to thread\n");
+ complete(&dev->cmnd_ready);
+ if (dev->ctl_thread)
+ wait_for_completion(&dev->control_exit);
+ if (dev->polling_thread)
+ wait_for_completion(&dev->polling_exit);
+
+ wait_timeout(200);
+
+ if (dev->rtsx_resv_buf) {
+ dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN,
+ dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr);
+ dev->chip->host_cmds_ptr = NULL;
+ dev->chip->host_sg_tbl_ptr = NULL;
+ }
+
+ if (dev->irq > 0)
+ free_irq(dev->irq, (void *)dev);
+ if (dev->chip->msi_en)
+ pci_disable_msi(dev->pci);
+ if (dev->remap_addr)
+ iounmap(dev->remap_addr);
+
+ pci_disable_device(dev->pci);
+ pci_release_regions(dev->pci);
+
+ rtsx_release_chip(dev->chip);
+ kfree(dev->chip);
+}
+
+/* First stage of disconnect processing: stop all commands and remove
+ * the host */
+static void quiesce_and_remove_host(struct rtsx_dev *dev)
+{
+ struct Scsi_Host *host = rtsx_to_host(dev);
+ struct rtsx_chip *chip = dev->chip;
+
+ /* Prevent new transfers, stop the current command, and
+ * interrupt a SCSI-scan or device-reset delay */
+ mutex_lock(&dev->dev_mutex);
+ scsi_lock(host);
+ rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
+ scsi_unlock(host);
+ mutex_unlock(&dev->dev_mutex);
+ wake_up(&dev->delay_wait);
+ wait_for_completion(&dev->scanning_done);
+
+ /* Wait some time to let other threads exist */
+ wait_timeout(100);
+
+ /* queuecommand won't accept any new commands and the control
+ * thread won't execute a previously-queued command. If there
+ * is such a command pending, complete it with an error. */
+ mutex_lock(&dev->dev_mutex);
+ if (chip->srb) {
+ chip->srb->result = DID_NO_CONNECT << 16;
+ scsi_lock(host);
+ chip->srb->scsi_done(dev->chip->srb);
+ chip->srb = NULL;
+ scsi_unlock(host);
+ }
+ mutex_unlock(&dev->dev_mutex);
+
+ /* Now we own no commands so it's safe to remove the SCSI host */
+ scsi_remove_host(host);
+}
+
+/* Second stage of disconnect processing: deallocate all resources */
+static void release_everything(struct rtsx_dev *dev)
+{
+ rtsx_release_resources(dev);
+
+ /* Drop our reference to the host; the SCSI core will free it
+ * when the refcount becomes 0. */
+ scsi_host_put(rtsx_to_host(dev));
+}
+
+/* Thread to carry out delayed SCSI-device scanning */
+static int rtsx_scan_thread(void *__dev)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_chip *chip = dev->chip;
+
+ /* Wait for the timeout to expire or for a disconnect */
+ if (delay_use > 0) {
+ dev_info(&dev->pci->dev,
+ "%s: waiting for device to settle before scanning\n",
+ CR_DRIVER_NAME);
+ wait_event_interruptible_timeout(dev->delay_wait,
+ rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
+ delay_use * HZ);
+ }
+
+ /* If the device is still connected, perform the scanning */
+ if (!rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ scsi_scan_host(rtsx_to_host(dev));
+ dev_info(&dev->pci->dev, "%s: device scan complete\n",
+ CR_DRIVER_NAME);
+
+ /* Should we unbind if no devices were detected? */
+ }
+
+ complete_and_exit(&dev->scanning_done, 0);
+}
+
+static void rtsx_init_options(struct rtsx_chip *chip)
+{
+ chip->vendor_id = chip->rtsx->pci->vendor;
+ chip->product_id = chip->rtsx->pci->device;
+ chip->adma_mode = 1;
+ chip->lun_mc = 0;
+ chip->driver_first_load = 1;
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ chip->sdio_in_charge = 0;
+#endif
+
+ chip->mspro_formatter_enable = 1;
+ chip->ignore_sd = 0;
+ chip->use_hw_setting = 0;
+ chip->lun_mode = DEFAULT_SINGLE;
+ chip->auto_delink_en = auto_delink_en;
+ chip->ss_en = ss_en;
+ chip->ss_idle_period = ss_interval * 1000;
+ chip->remote_wakeup_en = 0;
+ chip->aspm_l0s_l1_en = aspm_l0s_l1_en;
+ chip->dynamic_aspm = 1;
+ chip->fpga_sd_sdr104_clk = CLK_200;
+ chip->fpga_sd_ddr50_clk = CLK_100;
+ chip->fpga_sd_sdr50_clk = CLK_100;
+ chip->fpga_sd_hs_clk = CLK_100;
+ chip->fpga_mmc_52m_clk = CLK_80;
+ chip->fpga_ms_hg_clk = CLK_80;
+ chip->fpga_ms_4bit_clk = CLK_80;
+ chip->fpga_ms_1bit_clk = CLK_40;
+ chip->asic_sd_sdr104_clk = 203;
+ chip->asic_sd_sdr50_clk = 98;
+ chip->asic_sd_ddr50_clk = 98;
+ chip->asic_sd_hs_clk = 98;
+ chip->asic_mmc_52m_clk = 98;
+ chip->asic_ms_hg_clk = 117;
+ chip->asic_ms_4bit_clk = 78;
+ chip->asic_ms_1bit_clk = 39;
+ chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M;
+ chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M;
+ chip->ssc_depth_sd_ddr50 = SSC_DEPTH_1M;
+ chip->ssc_depth_sd_hs = SSC_DEPTH_1M;
+ chip->ssc_depth_mmc_52m = SSC_DEPTH_1M;
+ chip->ssc_depth_ms_hg = SSC_DEPTH_1M;
+ chip->ssc_depth_ms_4bit = SSC_DEPTH_512K;
+ chip->ssc_depth_low_speed = SSC_DEPTH_512K;
+ chip->ssc_en = 1;
+ chip->sd_speed_prior = 0x01040203;
+ chip->sd_current_prior = 0x00010203;
+ chip->sd_ctl = SD_PUSH_POINT_AUTO |
+ SD_SAMPLE_POINT_AUTO |
+ SUPPORT_MMC_DDR_MODE;
+ chip->sd_ddr_tx_phase = 0;
+ chip->mmc_ddr_tx_phase = 1;
+ chip->sd_default_tx_phase = 15;
+ chip->sd_default_rx_phase = 15;
+ chip->pmos_pwr_on_interval = 200;
+ chip->sd_voltage_switch_delay = 1000;
+ chip->ms_power_class_en = 3;
+
+ chip->sd_400mA_ocp_thd = 1;
+ chip->sd_800mA_ocp_thd = 5;
+ chip->ms_ocp_thd = 2;
+
+ chip->card_drive_sel = 0x55;
+ chip->sd30_drive_sel_1v8 = 0x03;
+ chip->sd30_drive_sel_3v3 = 0x01;
+
+ chip->do_delink_before_power_down = 1;
+ chip->auto_power_down = 1;
+ chip->polling_config = 0;
+
+ chip->force_clkreq_0 = 1;
+ chip->ft2_fast_mode = 0;
+
+ chip->sdio_retry_cnt = 1;
+
+ chip->xd_timeout = 2000;
+ chip->sd_timeout = 10000;
+ chip->ms_timeout = 2000;
+ chip->mspro_timeout = 15000;
+
+ chip->power_down_in_ss = 1;
+
+ chip->sdr104_en = 1;
+ chip->sdr50_en = 1;
+ chip->ddr50_en = 1;
+
+ chip->delink_stage1_step = 100;
+ chip->delink_stage2_step = 40;
+ chip->delink_stage3_step = 20;
+
+ chip->auto_delink_in_L1 = 1;
+ chip->blink_led = 1;
+ chip->msi_en = msi_en;
+ chip->hp_watch_bios_hotplug = 0;
+ chip->max_payload = 0;
+ chip->phy_voltage = 0;
+
+ chip->support_ms_8bit = 1;
+ chip->s3_pwr_off_delay = 1000;
+}
+
+static int rtsx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct Scsi_Host *host;
+ struct rtsx_dev *dev;
+ int err = 0;
+ struct task_struct *th;
+
+ RTSX_DEBUGP("Realtek PCI-E card reader detected\n");
+
+ err = pci_enable_device(pci);
+ if (err < 0) {
+ dev_err(&pci->dev, "PCI enable device failed!\n");
+ return err;
+ }
+
+ err = pci_request_regions(pci, CR_DRIVER_NAME);
+ if (err < 0) {
+ dev_err(&pci->dev, "PCI request regions for %s failed!\n",
+ CR_DRIVER_NAME);
+ pci_disable_device(pci);
+ return err;
+ }
+
+ /*
+ * Ask the SCSI layer to allocate a host structure, with extra
+ * space at the end for our private rtsx_dev structure.
+ */
+ host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
+ if (!host) {
+ dev_err(&pci->dev, "Unable to allocate the scsi host\n");
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+ return -ENOMEM;
+ }
+
+ dev = host_to_rtsx(host);
+ memset(dev, 0, sizeof(struct rtsx_dev));
+
+ dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
+ if (dev->chip == NULL) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ spin_lock_init(&dev->reg_lock);
+ mutex_init(&(dev->dev_mutex));
+ init_completion(&dev->cmnd_ready);
+ init_completion(&dev->control_exit);
+ init_completion(&dev->polling_exit);
+ init_completion(&(dev->notify));
+ init_completion(&dev->scanning_done);
+ init_waitqueue_head(&dev->delay_wait);
+
+ dev->pci = pci;
+ dev->irq = -1;
+
+ dev_info(&pci->dev, "Resource length: 0x%x\n",
+ (unsigned int)pci_resource_len(pci, 0));
+ dev->addr = pci_resource_start(pci, 0);
+ dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
+ if (dev->remap_addr == NULL) {
+ dev_err(&pci->dev, "ioremap error\n");
+ err = -ENXIO;
+ goto errout;
+ }
+
+ /*
+ * Using "unsigned long" cast here to eliminate gcc warning in
+ * 64-bit system
+ */
+ dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n",
+ (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
+
+ dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN,
+ &(dev->rtsx_resv_buf_addr), GFP_KERNEL);
+ if (dev->rtsx_resv_buf == NULL) {
+ dev_err(&pci->dev, "alloc dma buffer fail\n");
+ err = -ENXIO;
+ goto errout;
+ }
+ dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
+ dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
+ dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
+ dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
+ HOST_CMDS_BUF_LEN;
+
+ dev->chip->rtsx = dev;
+
+ rtsx_init_options(dev->chip);
+
+ dev_info(&pci->dev, "pci->irq = %d\n", pci->irq);
+
+ if (dev->chip->msi_en) {
+ if (pci_enable_msi(pci) < 0)
+ dev->chip->msi_en = 0;
+ }
+
+ if (rtsx_acquire_irq(dev) < 0) {
+ err = -EBUSY;
+ goto errout;
+ }
+
+ pci_set_master(pci);
+ synchronize_irq(dev->irq);
+
+ rtsx_init_chip(dev->chip);
+
+ /* set the supported max_lun and max_id for the scsi host
+ * NOTE: the minimal value of max_id is 1 */
+ host->max_id = 1;
+ host->max_lun = dev->chip->max_lun;
+
+ /* Start up our control thread */
+ th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
+ if (IS_ERR(th)) {
+ dev_err(&pci->dev, "Unable to start control thread\n");
+ err = PTR_ERR(th);
+ goto errout;
+ }
+ dev->ctl_thread = th;
+
+ err = scsi_add_host(host, &pci->dev);
+ if (err) {
+ dev_err(&pci->dev, "Unable to add the scsi host\n");
+ goto errout;
+ }
+
+ /* Start up the thread for delayed SCSI-device scanning */
+ th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan");
+ if (IS_ERR(th)) {
+ dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
+ quiesce_and_remove_host(dev);
+ err = PTR_ERR(th);
+ goto errout;
+ }
+
+ /* Start up the thread for polling thread */
+ th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
+ if (IS_ERR(th)) {
+ dev_err(&pci->dev, "Unable to start the device-polling thread\n");
+ quiesce_and_remove_host(dev);
+ err = PTR_ERR(th);
+ goto errout;
+ }
+ dev->polling_thread = th;
+
+ pci_set_drvdata(pci, dev);
+
+ return 0;
+
+ /* We come here if there are any problems */
+errout:
+ dev_err(&pci->dev, "rtsx_probe() failed\n");
+ release_everything(dev);
+
+ return err;
+}
+
+
+static void rtsx_remove(struct pci_dev *pci)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+
+ dev_info(&pci->dev, "rtsx_remove() called\n");
+
+ quiesce_and_remove_host(dev);
+ release_everything(dev);
+
+ pci_set_drvdata(pci, NULL);
+}
+
+/* PCI IDs */
+static DEFINE_PCI_DEVICE_TABLE(rtsx_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, rtsx_ids);
+
+/* pci_driver definition */
+static struct pci_driver driver = {
+ .name = CR_DRIVER_NAME,
+ .id_table = rtsx_ids,
+ .probe = rtsx_probe,
+ .remove = rtsx_remove,
+#ifdef CONFIG_PM
+ .suspend = rtsx_suspend,
+ .resume = rtsx_resume,
+#endif
+ .shutdown = rtsx_shutdown,
+};
+
+static int __init rtsx_init(void)
+{
+ pr_info("Initializing Realtek PCIE storage driver...\n");
+
+ return pci_register_driver(&driver);
+}
+
+static void __exit rtsx_exit(void)
+{
+ pr_info("rtsx_exit() called\n");
+
+ pci_unregister_driver(&driver);
+
+ pr_info("%s module exit\n", CR_DRIVER_NAME);
+}
+
+module_init(rtsx_init)
+module_exit(rtsx_exit)
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
new file mode 100644
index 0000000..37eab56
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx.h
@@ -0,0 +1,185 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_H
+#define __REALTEK_RTSX_H
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/cdrom.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "general.h"
+
+#define CR_DRIVER_NAME "rts5208"
+
+#define pci_get_bus_and_slot(bus, devfn) \
+ pci_get_domain_bus_and_slot(0, (bus), (devfn))
+
+/*
+ * macros for easy use
+ */
+#define rtsx_writel(chip, reg, value) \
+ iowrite32(value, (chip)->rtsx->remap_addr + reg)
+#define rtsx_readl(chip, reg) \
+ ioread32((chip)->rtsx->remap_addr + reg)
+#define rtsx_writew(chip, reg, value) \
+ iowrite16(value, (chip)->rtsx->remap_addr + reg)
+#define rtsx_readw(chip, reg) \
+ ioread16((chip)->rtsx->remap_addr + reg)
+#define rtsx_writeb(chip, reg, value) \
+ iowrite8(value, (chip)->rtsx->remap_addr + reg)
+#define rtsx_readb(chip, reg) \
+ ioread8((chip)->rtsx->remap_addr + reg)
+
+#define rtsx_read_config_byte(chip, where, val) \
+ pci_read_config_byte((chip)->rtsx->pci, where, val)
+
+#define rtsx_write_config_byte(chip, where, val) \
+ pci_write_config_byte((chip)->rtsx->pci, where, val)
+
+#define wait_timeout_x(task_state, msecs) \
+do { \
+ set_current_state((task_state)); \
+ schedule_timeout((msecs) * HZ / 1000); \
+} while (0)
+#define wait_timeout(msecs) wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
+
+
+#define STATE_TRANS_NONE 0
+#define STATE_TRANS_CMD 1
+#define STATE_TRANS_BUF 2
+#define STATE_TRANS_SG 3
+
+#define TRANS_NOT_READY 0
+#define TRANS_RESULT_OK 1
+#define TRANS_RESULT_FAIL 2
+
+#define SCSI_LUN(srb) ((srb)->device->lun)
+
+typedef unsigned long DELAY_PARA_T;
+
+struct rtsx_chip;
+
+struct rtsx_dev {
+ struct pci_dev *pci;
+
+ /* pci resources */
+ unsigned long addr;
+ void __iomem *remap_addr;
+ int irq;
+
+ /* locks */
+ spinlock_t reg_lock;
+
+ struct task_struct *ctl_thread; /* the control thread */
+ struct task_struct *polling_thread; /* the polling thread */
+
+ /* mutual exclusion and synchronization structures */
+ struct completion cmnd_ready; /* to sleep thread on */
+ struct completion control_exit; /* control thread exit */
+ struct completion polling_exit; /* polling thread exit */
+ struct completion notify; /* thread begin/end */
+ struct completion scanning_done; /* wait for scan thread */
+
+ wait_queue_head_t delay_wait; /* wait during scan, reset */
+ struct mutex dev_mutex;
+
+ /* host reserved buffer */
+ void *rtsx_resv_buf;
+ dma_addr_t rtsx_resv_buf_addr;
+
+ char trans_result;
+ char trans_state;
+
+ struct completion *done;
+ /* Whether interrupt handler should care card cd info */
+ u32 check_card_cd;
+
+ struct rtsx_chip *chip;
+};
+
+typedef struct rtsx_dev rtsx_dev_t;
+
+/* Convert between rtsx_dev and the corresponding Scsi_Host */
+static inline struct Scsi_Host *rtsx_to_host(struct rtsx_dev *dev)
+{
+ return container_of((void *) dev, struct Scsi_Host, hostdata);
+}
+static inline struct rtsx_dev *host_to_rtsx(struct Scsi_Host *host)
+{
+ return (struct rtsx_dev *) host->hostdata;
+}
+
+static inline void get_current_time(u8 *timeval_buf, int buf_len)
+{
+ struct timeval tv;
+
+ if (!timeval_buf || (buf_len < 8))
+ return;
+
+ do_gettimeofday(&tv);
+
+ timeval_buf[0] = (u8)(tv.tv_sec >> 24);
+ timeval_buf[1] = (u8)(tv.tv_sec >> 16);
+ timeval_buf[2] = (u8)(tv.tv_sec >> 8);
+ timeval_buf[3] = (u8)(tv.tv_sec);
+ timeval_buf[4] = (u8)(tv.tv_usec >> 24);
+ timeval_buf[5] = (u8)(tv.tv_usec >> 16);
+ timeval_buf[6] = (u8)(tv.tv_usec >> 8);
+ timeval_buf[7] = (u8)(tv.tv_usec);
+}
+
+/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
+ * single queue element srb for write access */
+#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
+#define scsi_lock(host) spin_lock_irq(host->host_lock)
+
+#define lock_state(chip) spin_lock_irq(&((chip)->rtsx->reg_lock))
+#define unlock_state(chip) spin_unlock_irq(&((chip)->rtsx->reg_lock))
+
+/* struct scsi_cmnd transfer buffer access utilities */
+enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
+
+int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val);
+
+#endif /* __REALTEK_RTSX_H */
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
new file mode 100644
index 0000000..3055eb1
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -0,0 +1,1126 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+
+#include "rtsx_sys.h"
+#include "general.h"
+
+#include "sd.h"
+#include "xd.h"
+#include "ms.h"
+
+void do_remaining_work(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+#ifdef XD_DELAY_WRITE
+ struct xd_info *xd_card = &(chip->xd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (chip->card_ready & SD_CARD) {
+ if (sd_card->seq_mode) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ sd_card->cleanup_counter++;
+ } else {
+ sd_card->cleanup_counter = 0;
+ }
+ }
+
+#ifdef XD_DELAY_WRITE
+ if (chip->card_ready & XD_CARD) {
+ if (xd_card->delay_write.delay_write_flag) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ xd_card->cleanup_counter++;
+ } else {
+ xd_card->cleanup_counter = 0;
+ }
+ }
+#endif
+
+ if (chip->card_ready & MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (ms_card->seq_mode) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ ms_card->cleanup_counter++;
+ } else {
+ ms_card->cleanup_counter = 0;
+ }
+ } else {
+#ifdef MS_DELAY_WRITE
+ if (ms_card->delay_write.delay_write_flag) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ ms_card->cleanup_counter++;
+ } else {
+ ms_card->cleanup_counter = 0;
+ }
+#endif
+ }
+ }
+
+ if (sd_card->cleanup_counter > POLLING_WAIT_CNT)
+ sd_cleanup_work(chip);
+
+ if (xd_card->cleanup_counter > POLLING_WAIT_CNT)
+ xd_cleanup_work(chip);
+
+ if (ms_card->cleanup_counter > POLLING_WAIT_CNT)
+ ms_cleanup_work(chip);
+}
+
+void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
+{
+ u8 reg1 = 0, reg2 = 0;
+
+ rtsx_read_register(chip, 0xFF34, &reg1);
+ rtsx_read_register(chip, 0xFF38, &reg2);
+ RTSX_DEBUGP("reg 0xFF34: 0x%x, reg 0xFF38: 0x%x\n", reg1, reg2);
+ if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
+ chip->sd_int = 1;
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON);
+ }
+}
+
+#ifdef SUPPORT_SDIO_ASPM
+void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
+{
+ u8 buf[12], reg;
+ int i;
+
+ for (i = 0; i < 12; i++)
+ rtsx_read_register(chip, 0xFF08 + i, &buf[i]);
+ rtsx_read_register(chip, 0xFF25, &reg);
+ if ((memcmp(buf, chip->sdio_raw_data, 12) != 0) || (reg & 0x03)) {
+ chip->sdio_counter = 0;
+ chip->sdio_idle = 0;
+ } else {
+ if (!chip->sdio_idle) {
+ chip->sdio_counter++;
+ if (chip->sdio_counter >= SDIO_IDLE_COUNT) {
+ chip->sdio_counter = 0;
+ chip->sdio_idle = 1;
+ }
+ }
+ }
+ memcpy(chip->sdio_raw_data, buf, 12);
+
+ if (chip->sdio_idle) {
+ if (!chip->sdio_aspm) {
+ RTSX_DEBUGP("SDIO enter ASPM!\n");
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
+ 0x30 | (chip->aspm_level[1] << 2));
+ chip->sdio_aspm = 1;
+ }
+ } else {
+ if (chip->sdio_aspm) {
+ RTSX_DEBUGP("SDIO exit ASPM!\n");
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC, 0x30);
+ chip->sdio_aspm = 0;
+ }
+ }
+}
+#endif
+
+void do_reset_sd_card(struct rtsx_chip *chip)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s: %d, card2lun = 0x%x\n", __func__,
+ chip->sd_reset_counter, chip->card2lun[SD_CARD]);
+
+ if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ return;
+ }
+
+ chip->rw_fail_cnt[chip->card2lun[SD_CARD]] = 0;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
+
+ retval = reset_sd_card(chip);
+ if (chip->need_release & SD_CARD)
+ return;
+ if (retval == STATUS_SUCCESS) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ chip->card_ready |= SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
+ } else {
+ if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ } else {
+ chip->sd_reset_counter++;
+ }
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->rw_card[chip->card2lun[SD_CARD]] = NULL;
+
+ rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode)
+ card_power_off(chip, SD_CARD);
+ if (chip->sd_io) {
+ chip->sd_int = 0;
+ try_to_switch_sdio_ctrl(chip);
+ } else {
+ disable_card_clock(chip, SD_CARD);
+ }
+ }
+}
+
+void do_reset_xd_card(struct rtsx_chip *chip)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s: %d, card2lun = 0x%x\n", __func__,
+ chip->xd_reset_counter, chip->card2lun[XD_CARD]);
+
+ if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
+ clear_bit(XD_NR, &(chip->need_reset));
+ chip->xd_reset_counter = 0;
+ chip->xd_show_cnt = 0;
+ return;
+ }
+
+ chip->rw_fail_cnt[chip->card2lun[XD_CARD]] = 0;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
+
+ retval = reset_xd_card(chip);
+ if (chip->need_release & XD_CARD)
+ return;
+ if (retval == STATUS_SUCCESS) {
+ clear_bit(XD_NR, &(chip->need_reset));
+ chip->xd_reset_counter = 0;
+ chip->card_ready |= XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
+ } else {
+ if (chip->xd_reset_counter >= MAX_RESET_CNT) {
+ clear_bit(XD_NR, &(chip->need_reset));
+ chip->xd_reset_counter = 0;
+ chip->xd_show_cnt = 0;
+ } else {
+ chip->xd_reset_counter++;
+ }
+ chip->card_ready &= ~XD_CARD;
+ chip->card_fail |= XD_CARD;
+ chip->capacity[chip->card2lun[XD_CARD]] = 0;
+ chip->rw_card[chip->card2lun[XD_CARD]] = NULL;
+
+ rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode)
+ card_power_off(chip, XD_CARD);
+ disable_card_clock(chip, XD_CARD);
+ }
+}
+
+void do_reset_ms_card(struct rtsx_chip *chip)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s: %d, card2lun = 0x%x\n", __func__,
+ chip->ms_reset_counter, chip->card2lun[MS_CARD]);
+
+ if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
+ clear_bit(MS_NR, &(chip->need_reset));
+ chip->ms_reset_counter = 0;
+ chip->ms_show_cnt = 0;
+ return;
+ }
+
+ chip->rw_fail_cnt[chip->card2lun[MS_CARD]] = 0;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
+
+ retval = reset_ms_card(chip);
+ if (chip->need_release & MS_CARD)
+ return;
+ if (retval == STATUS_SUCCESS) {
+ clear_bit(MS_NR, &(chip->need_reset));
+ chip->ms_reset_counter = 0;
+ chip->card_ready |= MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
+ } else {
+ if (chip->ms_reset_counter >= MAX_RESET_CNT) {
+ clear_bit(MS_NR, &(chip->need_reset));
+ chip->ms_reset_counter = 0;
+ chip->ms_show_cnt = 0;
+ } else {
+ chip->ms_reset_counter++;
+ }
+ chip->card_ready &= ~MS_CARD;
+ chip->card_fail |= MS_CARD;
+ chip->capacity[chip->card2lun[MS_CARD]] = 0;
+ chip->rw_card[chip->card2lun[MS_CARD]] = NULL;
+
+ rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode)
+ card_power_off(chip, MS_CARD);
+ disable_card_clock(chip, MS_CARD);
+ }
+}
+
+static void release_sdio(struct rtsx_chip *chip)
+{
+ if (chip->sd_io) {
+ rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ if (chip->chip_insert_with_sdio) {
+ chip->chip_insert_with_sdio = 0;
+
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_register(chip, 0xFE5A, 0x08, 0x00);
+ else
+ rtsx_write_register(chip, 0xFE70, 0x80, 0x00);
+ }
+
+ rtsx_write_register(chip, SDIO_CTRL, SDIO_CD_CTRL, 0);
+ chip->sd_io = 0;
+ }
+}
+
+void rtsx_power_off_card(struct rtsx_chip *chip)
+{
+ if ((chip->card_ready & SD_CARD) || chip->sd_io) {
+ sd_cleanup_work(chip);
+ sd_power_off_card3v3(chip);
+ }
+
+ if (chip->card_ready & XD_CARD) {
+ xd_cleanup_work(chip);
+ xd_power_off_card3v3(chip);
+ }
+
+ if (chip->card_ready & MS_CARD) {
+ ms_cleanup_work(chip);
+ ms_power_off_card3v3(chip);
+ }
+}
+
+void rtsx_release_cards(struct rtsx_chip *chip)
+{
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if ((chip->card_ready & SD_CARD) || chip->sd_io) {
+ if (chip->int_reg & SD_EXIST)
+ sd_cleanup_work(chip);
+ release_sd_card(chip);
+ }
+
+ if (chip->card_ready & XD_CARD) {
+ if (chip->int_reg & XD_EXIST)
+ xd_cleanup_work(chip);
+ release_xd_card(chip);
+ }
+
+ if (chip->card_ready & MS_CARD) {
+ if (chip->int_reg & MS_EXIST)
+ ms_cleanup_work(chip);
+ release_ms_card(chip);
+ }
+}
+
+void rtsx_reset_cards(struct rtsx_chip *chip)
+{
+ if (!chip->need_reset)
+ return;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+
+ rtsx_disable_aspm(chip);
+
+ if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
+ clear_bit(SD_NR, &(chip->need_reset));
+
+ if (chip->need_reset & XD_CARD) {
+ chip->card_exist |= XD_CARD;
+
+ if (chip->xd_show_cnt >= MAX_SHOW_CNT)
+ do_reset_xd_card(chip);
+ else
+ chip->xd_show_cnt++;
+ }
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
+ if (chip->card_exist & XD_CARD) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &(chip->need_reset));
+ }
+ }
+ if (chip->need_reset & SD_CARD) {
+ chip->card_exist |= SD_CARD;
+
+ if (chip->sd_show_cnt >= MAX_SHOW_CNT) {
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ do_reset_sd_card(chip);
+ } else {
+ chip->sd_show_cnt++;
+ }
+ }
+ if (chip->need_reset & MS_CARD) {
+ chip->card_exist |= MS_CARD;
+
+ if (chip->ms_show_cnt >= MAX_SHOW_CNT)
+ do_reset_ms_card(chip);
+ else
+ chip->ms_show_cnt++;
+ }
+}
+
+void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
+{
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+
+ if (reset_chip)
+ rtsx_reset_chip(chip);
+
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if ((chip->int_reg & SD_EXIST) && (chip->need_reinit & SD_CARD)) {
+ release_sdio(chip);
+ release_sd_card(chip);
+
+ wait_timeout(100);
+
+ chip->card_exist |= SD_CARD;
+ do_reset_sd_card(chip);
+ }
+
+ if ((chip->int_reg & XD_EXIST) && (chip->need_reinit & XD_CARD)) {
+ release_xd_card(chip);
+
+ wait_timeout(100);
+
+ chip->card_exist |= XD_CARD;
+ do_reset_xd_card(chip);
+ }
+
+ if ((chip->int_reg & MS_EXIST) && (chip->need_reinit & MS_CARD)) {
+ release_ms_card(chip);
+
+ wait_timeout(100);
+
+ chip->card_exist |= MS_CARD;
+ do_reset_ms_card(chip);
+ }
+
+ chip->need_reinit = 0;
+}
+
+#ifdef DISABLE_CARD_INT
+void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, unsigned long *need_release)
+{
+ u8 release_map = 0, reset_map = 0;
+
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (chip->card_exist) {
+ if (chip->card_exist & XD_CARD) {
+ if (!(chip->int_reg & XD_EXIST))
+ release_map |= XD_CARD;
+ } else if (chip->card_exist & SD_CARD) {
+ if (!(chip->int_reg & SD_EXIST))
+ release_map |= SD_CARD;
+ } else if (chip->card_exist & MS_CARD) {
+ if (!(chip->int_reg & MS_EXIST))
+ release_map |= MS_CARD;
+ }
+ } else {
+ if (chip->int_reg & XD_EXIST)
+ reset_map |= XD_CARD;
+ else if (chip->int_reg & SD_EXIST)
+ reset_map |= SD_CARD;
+ else if (chip->int_reg & MS_EXIST)
+ reset_map |= MS_CARD;
+ }
+
+ if (reset_map) {
+ int xd_cnt = 0, sd_cnt = 0, ms_cnt = 0;
+ int i;
+
+ for (i = 0; i < (DEBOUNCE_CNT); i++) {
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (chip->int_reg & XD_EXIST)
+ xd_cnt++;
+ else
+ xd_cnt = 0;
+
+ if (chip->int_reg & SD_EXIST)
+ sd_cnt++;
+ else
+ sd_cnt = 0;
+
+ if (chip->int_reg & MS_EXIST)
+ ms_cnt++;
+ else
+ ms_cnt = 0;
+
+ wait_timeout(30);
+ }
+
+ reset_map = 0;
+ if (!(chip->card_exist & XD_CARD) && (xd_cnt > (DEBOUNCE_CNT-1)))
+ reset_map |= XD_CARD;
+ if (!(chip->card_exist & SD_CARD) && (sd_cnt > (DEBOUNCE_CNT-1)))
+ reset_map |= SD_CARD;
+ if (!(chip->card_exist & MS_CARD) && (ms_cnt > (DEBOUNCE_CNT-1)))
+ reset_map |= MS_CARD;
+ }
+
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0x00);
+
+ if (need_reset)
+ *need_reset = reset_map;
+ if (need_release)
+ *need_release = release_map;
+}
+#endif
+
+void rtsx_init_cards(struct rtsx_chip *chip)
+{
+ if (RTSX_TST_DELINK(chip) && (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+ RTSX_DEBUGP("Reset chip in polling thread!\n");
+ rtsx_reset_chip(chip);
+ RTSX_CLR_DELINK(chip);
+ }
+
+#ifdef DISABLE_CARD_INT
+ card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release));
+#endif
+
+ if (chip->need_release) {
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
+ if (chip->int_reg & XD_EXIST) {
+ clear_bit(SD_NR, &(chip->need_release));
+ clear_bit(MS_NR, &(chip->need_release));
+ }
+ }
+
+ if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
+ clear_bit(SD_NR, &(chip->need_release));
+ if (!(chip->card_exist & XD_CARD))
+ clear_bit(XD_NR, &(chip->need_release));
+ if (!(chip->card_exist & MS_CARD))
+ clear_bit(MS_NR, &(chip->need_release));
+
+ RTSX_DEBUGP("chip->need_release = 0x%x\n", (unsigned int)(chip->need_release));
+
+#ifdef SUPPORT_OCP
+ if (chip->need_release) {
+ if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
+ rtsx_write_register(chip, OCPCLR,
+ CARD_OC_INT_CLR | CARD_OC_CLR,
+ CARD_OC_INT_CLR | CARD_OC_CLR);
+ chip->ocp_stat = 0;
+ }
+#endif
+ if (chip->need_release) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+ }
+
+ if (chip->need_release & SD_CARD) {
+ clear_bit(SD_NR, &(chip->need_release));
+ chip->card_exist &= ~SD_CARD;
+ chip->card_ejected &= ~SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ CLR_BIT(chip->lun_mc, chip->card2lun[SD_CARD]);
+ chip->rw_fail_cnt[chip->card2lun[SD_CARD]] = 0;
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+
+ release_sdio(chip);
+ release_sd_card(chip);
+ }
+
+ if (chip->need_release & XD_CARD) {
+ clear_bit(XD_NR, &(chip->need_release));
+ chip->card_exist &= ~XD_CARD;
+ chip->card_ejected &= ~XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ CLR_BIT(chip->lun_mc, chip->card2lun[XD_CARD]);
+ chip->rw_fail_cnt[chip->card2lun[XD_CARD]] = 0;
+
+ release_xd_card(chip);
+
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0xC0);
+ }
+
+ if (chip->need_release & MS_CARD) {
+ clear_bit(MS_NR, &(chip->need_release));
+ chip->card_exist &= ~MS_CARD;
+ chip->card_ejected &= ~MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ CLR_BIT(chip->lun_mc, chip->card2lun[MS_CARD]);
+ chip->rw_fail_cnt[chip->card2lun[MS_CARD]] = 0;
+
+ release_ms_card(chip);
+ }
+
+ RTSX_DEBUGP("chip->card_exist = 0x%x\n", chip->card_exist);
+
+ if (!chip->card_exist)
+ turn_off_led(chip, LED_GPIO);
+ }
+
+ if (chip->need_reset) {
+ RTSX_DEBUGP("chip->need_reset = 0x%x\n", (unsigned int)(chip->need_reset));
+
+ rtsx_reset_cards(chip);
+ }
+
+ if (chip->need_reinit) {
+ RTSX_DEBUGP("chip->need_reinit = 0x%x\n", (unsigned int)(chip->need_reinit));
+
+ rtsx_reinit_cards(chip, 0);
+ }
+}
+
+static inline u8 double_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int switch_ssc_clock(struct rtsx_chip *chip, int clk)
+{
+ int retval;
+ u8 N = (u8)(clk - 2), min_N, max_N;
+ u8 mcu_cnt, div, max_div, ssc_depth, ssc_depth_mask;
+ int sd_vpclk_phase_reset = 0;
+
+ if (chip->cur_clk == clk)
+ return STATUS_SUCCESS;
+
+ min_N = 60;
+ max_N = 120;
+ max_div = CLK_DIV_4;
+
+ RTSX_DEBUGP("Switch SSC clock to %dMHz (cur_clk = %d)\n", clk, chip->cur_clk);
+
+ if ((clk <= 2) || (N > max_N))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 7)
+ mcu_cnt = 7;
+
+ div = CLK_DIV_1;
+ while ((N < min_N) && (div < max_div)) {
+ N = (N + 2) * 2 - 2;
+ div++;
+ }
+ RTSX_DEBUGP("N = %d, div = %d\n", N, div);
+
+ if (chip->ssc_en) {
+ ssc_depth = 0x01;
+ N -= 2;
+ } else {
+ ssc_depth = 0;
+ }
+
+ ssc_depth_mask = 0x03;
+
+ RTSX_DEBUGP("ssc_depth = %d\n", ssc_depth);
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0xFF, (div << 4) | mcu_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL2, ssc_depth_mask, ssc_depth);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (sd_vpclk_phase_reset) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ udelay(10);
+ RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ chip->cur_clk = clk;
+
+ return STATUS_SUCCESS;
+}
+
+int switch_normal_clock(struct rtsx_chip *chip, int clk)
+{
+ u8 sel, div, mcu_cnt;
+ int sd_vpclk_phase_reset = 0;
+
+ if (chip->cur_clk == clk)
+ return STATUS_SUCCESS;
+
+ switch (clk) {
+ case CLK_20:
+ RTSX_DEBUGP("Switch clock to 20MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_4;
+ mcu_cnt = 7;
+ break;
+
+ case CLK_30:
+ RTSX_DEBUGP("Switch clock to 30MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_4;
+ mcu_cnt = 7;
+ break;
+
+ case CLK_40:
+ RTSX_DEBUGP("Switch clock to 40MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_2;
+ mcu_cnt = 7;
+ break;
+
+ case CLK_50:
+ RTSX_DEBUGP("Switch clock to 50MHz\n");
+ sel = SSC_100;
+ div = CLK_DIV_2;
+ mcu_cnt = 6;
+ break;
+
+ case CLK_60:
+ RTSX_DEBUGP("Switch clock to 60MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_2;
+ mcu_cnt = 6;
+ break;
+
+ case CLK_80:
+ RTSX_DEBUGP("Switch clock to 80MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_1;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_100:
+ RTSX_DEBUGP("Switch clock to 100MHz\n");
+ sel = SSC_100;
+ div = CLK_DIV_1;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_120:
+ RTSX_DEBUGP("Switch clock to 120MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_1;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_150:
+ RTSX_DEBUGP("Switch clock to 150MHz\n");
+ sel = SSC_150;
+ div = CLK_DIV_1;
+ mcu_cnt = 4;
+ break;
+
+ case CLK_200:
+ RTSX_DEBUGP("Switch clock to 200MHz\n");
+ sel = SSC_200;
+ div = CLK_DIV_1;
+ mcu_cnt = 4;
+ break;
+
+ default:
+ RTSX_DEBUGP("Try to switch to an illegal clock (%d)\n", clk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, CLK_CTL, 0xFF, CLK_LOW_FREQ);
+ if (sd_vpclk_phase_reset) {
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET, 0);
+ }
+ RTSX_WRITE_REG(chip, CLK_DIV, 0xFF, (div << 4) | mcu_cnt);
+ RTSX_WRITE_REG(chip, CLK_SEL, 0xFF, sel);
+
+ if (sd_vpclk_phase_reset) {
+ udelay(200);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ udelay(200);
+ }
+ RTSX_WRITE_REG(chip, CLK_CTL, 0xFF, 0);
+
+ chip->cur_clk = clk;
+
+ return STATUS_SUCCESS;
+}
+
+void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size)
+{
+ if (pack_size > DMA_1024)
+ pack_size = DMA_512;
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, IRQSTAT0, DMA_DONE_INT, DMA_DONE_INT);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(byte_cnt >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(byte_cnt >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(byte_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC0, 0xFF, (u8)byte_cnt);
+
+ if (dir == DMA_FROM_DEVICE) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | pack_size);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | pack_size);
+ }
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+}
+
+int enable_card_clock(struct rtsx_chip *chip, u8 card)
+{
+ u8 clk_en = 0;
+
+ if (card & XD_CARD)
+ clk_en |= XD_CLK_EN;
+ if (card & SD_CARD)
+ clk_en |= SD_CLK_EN;
+ if (card & MS_CARD)
+ clk_en |= MS_CLK_EN;
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, clk_en, clk_en);
+
+ return STATUS_SUCCESS;
+}
+
+int disable_card_clock(struct rtsx_chip *chip, u8 card)
+{
+ u8 clk_en = 0;
+
+ if (card & XD_CARD)
+ clk_en |= XD_CLK_EN;
+ if (card & SD_CARD)
+ clk_en |= SD_CLK_EN;
+ if (card & MS_CARD)
+ clk_en |= MS_CLK_EN;
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, clk_en, 0);
+
+ return STATUS_SUCCESS;
+}
+
+int card_power_on(struct rtsx_chip *chip, u8 card)
+{
+ int retval;
+ u8 mask, val1, val2;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (card == MS_CARD)) {
+ mask = MS_POWER_MASK;
+ val1 = MS_PARTIAL_POWER_ON;
+ val2 = MS_POWER_ON;
+ } else {
+ mask = SD_POWER_MASK;
+ val1 = SD_PARTIAL_POWER_ON;
+ val2 = SD_POWER_ON;
+ }
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, mask, val1);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ udelay(chip->pmos_pwr_on_interval);
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, mask, val2);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int card_power_off(struct rtsx_chip *chip, u8 card)
+{
+ u8 mask, val;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (card == MS_CARD)) {
+ mask = MS_POWER_MASK;
+ val = MS_POWER_OFF;
+ } else {
+ mask = SD_POWER_MASK;
+ val = SD_POWER_OFF;
+ }
+
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, mask, val);
+
+ return STATUS_SUCCESS;
+}
+
+int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 sec_addr, u16 sec_cnt)
+{
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ int i;
+
+ if (chip->rw_card[lun] == NULL)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < 3; i++) {
+ chip->rw_need_retry = 0;
+
+ retval = chip->rw_card[lun](srb, chip, sec_addr, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+ if (rtsx_check_chip_exist(chip) != STATUS_SUCCESS) {
+ rtsx_release_chip(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (detect_card_cd(chip, chip->cur_card) != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->rw_need_retry) {
+ RTSX_DEBUGP("RW fail, but no need to retry\n");
+ break;
+ }
+ } else {
+ chip->rw_need_retry = 0;
+ break;
+ }
+
+ RTSX_DEBUGP("Retry RW, (i = %d)\n", i);
+ }
+
+ return retval;
+}
+
+int card_share_mode(struct rtsx_chip *chip, int card)
+{
+ u8 mask, value;
+
+ if (CHECK_PID(chip, 0x5208)) {
+ mask = CARD_SHARE_MASK;
+ if (card == SD_CARD)
+ value = CARD_SHARE_48_SD;
+ else if (card == MS_CARD)
+ value = CARD_SHARE_48_MS;
+ else if (card == XD_CARD)
+ value = CARD_SHARE_48_XD;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else if (CHECK_PID(chip, 0x5288)) {
+ mask = 0x03;
+ if (card == SD_CARD)
+ value = CARD_SHARE_BAROSSA_SD;
+ else if (card == MS_CARD)
+ value = CARD_SHARE_BAROSSA_MS;
+ else if (card == XD_CARD)
+ value = CARD_SHARE_BAROSSA_XD;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, CARD_SHARE_MODE, mask, value);
+
+ return STATUS_SUCCESS;
+}
+
+
+int select_card(struct rtsx_chip *chip, int card)
+{
+ int retval;
+
+ if (chip->cur_card != card) {
+ u8 mod;
+
+ if (card == SD_CARD)
+ mod = SD_MOD_SEL;
+ else if (card == MS_CARD)
+ mod = MS_MOD_SEL;
+ else if (card == XD_CARD)
+ mod = XD_MOD_SEL;
+ else if (card == SPI_CARD)
+ mod = SPI_MOD_SEL;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_SELECT, 0x07, mod);
+ chip->cur_card = card;
+
+ retval = card_share_mode(chip, card);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void toggle_gpio(struct rtsx_chip *chip, u8 gpio)
+{
+ u8 temp_reg;
+
+ rtsx_read_register(chip, CARD_GPIO, &temp_reg);
+ temp_reg ^= (0x01 << gpio);
+ rtsx_write_register(chip, CARD_GPIO, 0xFF, temp_reg);
+}
+
+void turn_on_led(struct rtsx_chip *chip, u8 gpio)
+{
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+ else
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
+}
+
+void turn_off_led(struct rtsx_chip *chip, u8 gpio)
+{
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
+ else
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+}
+
+int detect_card_cd(struct rtsx_chip *chip, int card)
+{
+ u32 card_cd, status;
+
+ if (card == SD_CARD) {
+ card_cd = SD_EXIST;
+ } else if (card == MS_CARD) {
+ card_cd = MS_EXIST;
+ } else if (card == XD_CARD) {
+ card_cd = XD_EXIST;
+ } else {
+ RTSX_DEBUGP("Wrong card type: 0x%x\n", card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ status = rtsx_readl(chip, RTSX_BIPR);
+ if (!(status & card_cd))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int check_card_exist(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_exist & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_ready(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_ready & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_wp & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_fail(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_fail & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_ejected(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_ejected & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
+{
+ if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
+ return (u8)XD_CARD;
+ else if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD)
+ return (u8)SD_CARD;
+ else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD)
+ return (u8)MS_CARD;
+
+ return 0;
+}
+
+void eject_card(struct rtsx_chip *chip, unsigned int lun)
+{
+ do_remaining_work(chip);
+
+ if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD) {
+ release_sd_card(chip);
+ chip->card_ejected |= SD_CARD;
+ chip->card_ready &= ~SD_CARD;
+ chip->capacity[lun] = 0;
+ } else if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD) {
+ release_xd_card(chip);
+ chip->card_ejected |= XD_CARD;
+ chip->card_ready &= ~XD_CARD;
+ chip->capacity[lun] = 0;
+ } else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD) {
+ release_ms_card(chip);
+ chip->card_ejected |= MS_CARD;
+ chip->card_ready &= ~MS_CARD;
+ chip->capacity[lun] = 0;
+ }
+}
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
new file mode 100644
index 0000000..4528b61
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -0,0 +1,1098 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_CARD_H
+#define __REALTEK_RTSX_CARD_H
+
+#include "debug.h"
+#include "rtsx.h"
+#include "rtsx_chip.h"
+#include "rtsx_transport.h"
+#include "sd.h"
+
+#define SSC_POWER_DOWN 0x01
+#define SD_OC_POWER_DOWN 0x02
+#define MS_OC_POWER_DOWN 0x04
+#define ALL_POWER_DOWN 0x07
+#define OC_POWER_DOWN 0x06
+
+#define PMOS_STRG_MASK 0x10
+#define PMOS_STRG_800mA 0x10
+#define PMOS_STRG_400mA 0x00
+
+#define POWER_OFF 0x03
+#define PARTIAL_POWER_ON 0x01
+#define POWER_ON 0x00
+
+#define MS_POWER_OFF 0x0C
+#define MS_PARTIAL_POWER_ON 0x04
+#define MS_POWER_ON 0x00
+#define MS_POWER_MASK 0x0C
+
+#define SD_POWER_OFF 0x03
+#define SD_PARTIAL_POWER_ON 0x01
+#define SD_POWER_ON 0x00
+#define SD_POWER_MASK 0x03
+
+#define XD_OUTPUT_EN 0x02
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
+#define SPI_OUTPUT_EN 0x10
+
+#define CLK_LOW_FREQ 0x01
+
+#define CLK_DIV_1 0x01
+#define CLK_DIV_2 0x02
+#define CLK_DIV_4 0x03
+#define CLK_DIV_8 0x04
+
+#define SSC_80 0
+#define SSC_100 1
+#define SSC_120 2
+#define SSC_150 3
+#define SSC_200 4
+
+#define XD_CLK_EN 0x02
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
+#define SPI_CLK_EN 0x10
+
+#define XD_MOD_SEL 1
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
+#define SPI_MOD_SEL 4
+
+#define CHANGE_CLK 0x01
+
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
+
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
+
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
+
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
+
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
+
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
+
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
+
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
+
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_BUS_WIDTH_1 0x00
+#define SD_BUS_WIDTH_4 0x01
+#define SD_BUS_WIDTH_8 0x02
+#define SD_ASYNC_FIFO_NOT_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+
+#define SD_CLK_DIVIDE_MASK 0xC0
+
+#define SD_CMD_IDLE 0x80
+
+#define SD_DATA_IDLE 0x80
+
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_NO_CHECK_WAIT_CRC_TO 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
+
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
+
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
+
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
+
+#define CARD_SHARE_MASK 0x0F
+#define CARD_SHARE_MULTI_LUN 0x00
+#define CARD_SHARE_NORMAL 0x00
+#define CARD_SHARE_48_XD 0x02
+#define CARD_SHARE_48_SD 0x04
+#define CARD_SHARE_48_MS 0x08
+#define CARD_SHARE_BAROSSA_XD 0x00
+#define CARD_SHARE_BAROSSA_SD 0x01
+#define CARD_SHARE_BAROSSA_MS 0x02
+
+#define MS_DRIVE_8 0x00
+#define MS_DRIVE_4 0x40
+#define MS_DRIVE_12 0x80
+#define SD_DRIVE_8 0x00
+#define SD_DRIVE_4 0x10
+#define SD_DRIVE_12 0x20
+#define XD_DRIVE_8 0x00
+#define XD_DRIVE_4 0x04
+#define XD_DRIVE_12 0x08
+
+#define SPI_STOP 0x01
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SPI_CLR_ERR 0x10
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
+
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
+
+#define SDIO_VER_20 0x80
+#define SDIO_VER_10 0x00
+#define SDIO_VER_CHG 0x40
+#define SDIO_BUS_AUTO_SWITCH 0x10
+
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
+
+#define RB_FLUSH 0x80
+
+#define DMA_DONE_INT_EN 0x80
+#define SUSPEND_INT_EN 0x40
+#define LINK_RDY_INT_EN 0x20
+#define LINK_DOWN_INT_EN 0x10
+
+#define DMA_DONE_INT 0x80
+#define SUSPEND_INT 0x40
+#define LINK_RDY_INT 0x20
+#define LINK_DOWN_INT 0x10
+
+#define MRD_ERR_INT_EN 0x40
+#define MWR_ERR_INT_EN 0x20
+#define SCSI_CMD_INT_EN 0x10
+#define TLP_RCV_INT_EN 0x08
+#define TLP_TRSMT_INT_EN 0x04
+#define MRD_COMPLETE_INT_EN 0x02
+#define MWR_COMPLETE_INT_EN 0x01
+
+#define MRD_ERR_INT 0x40
+#define MWR_ERR_INT 0x20
+#define SCSI_CMD_INT 0x10
+#define TLP_RX_INT 0x08
+#define TLP_TX_INT 0x04
+#define MRD_COMPLETE_INT 0x02
+#define MWR_COMPLETE_INT 0x01
+
+#define MSG_RX_INT_EN 0x08
+#define MRD_RX_INT_EN 0x04
+#define MWR_RX_INT_EN 0x02
+#define CPLD_RX_INT_EN 0x01
+
+#define MSG_RX_INT 0x08
+#define MRD_RX_INT 0x04
+#define MWR_RX_INT 0x02
+#define CPLD_RX_INT 0x01
+
+#define MSG_TX_INT_EN 0x08
+#define MRD_TX_INT_EN 0x04
+#define MWR_TX_INT_EN 0x02
+#define CPLD_TX_INT_EN 0x01
+
+#define MSG_TX_INT 0x08
+#define MRD_TX_INT 0x04
+#define MWR_TX_INT 0x02
+#define CPLD_TX_INT 0x01
+
+#define DMA_RST 0x80
+#define DMA_BUSY 0x04
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 4)
+#define DMA_256 (1 << 4)
+#define DMA_512 (2 << 4)
+#define DMA_1024 (3 << 4)
+#define DMA_PACK_SIZE_MASK 0x30
+
+#define XD_PWR_OFF_DELAY0 0x00
+#define XD_PWR_OFF_DELAY1 0x02
+#define XD_PWR_OFF_DELAY2 0x04
+#define XD_PWR_OFF_DELAY3 0x06
+#define XD_AUTO_PWR_OFF_EN 0xF7
+#define XD_NO_AUTO_PWR_OFF 0x08
+
+#define XD_TIME_RWN_1 0x00
+#define XD_TIME_RWN_STEP 0x20
+#define XD_TIME_RW_1 0x00
+#define XD_TIME_RW_STEP 0x04
+#define XD_TIME_SETUP_1 0x00
+#define XD_TIME_SETUP_STEP 0x01
+
+#define XD_ECC2_UNCORRECTABLE 0x80
+#define XD_ECC2_ERROR 0x40
+#define XD_ECC1_UNCORRECTABLE 0x20
+#define XD_ECC1_ERROR 0x10
+#define XD_RDY 0x04
+#define XD_CE_EN 0xFD
+#define XD_CE_DISEN 0x02
+#define XD_WP_EN 0xFE
+#define XD_WP_DISEN 0x01
+
+#define XD_TRANSFER_START 0x80
+#define XD_TRANSFER_END 0x40
+#define XD_PPB_EMPTY 0x20
+#define XD_RESET 0x00
+#define XD_ERASE 0x01
+#define XD_READ_STATUS 0x02
+#define XD_READ_ID 0x03
+#define XD_READ_REDUNDANT 0x04
+#define XD_READ_PAGES 0x05
+#define XD_SET_CMD 0x06
+#define XD_NORMAL_READ 0x07
+#define XD_WRITE_PAGES 0x08
+#define XD_NORMAL_WRITE 0x09
+#define XD_WRITE_REDUNDANT 0x0A
+#define XD_SET_ADDR 0x0B
+
+#define XD_PPB_TO_SIE 0x80
+#define XD_TO_PPB_ONLY 0x00
+#define XD_BA_TRANSFORM 0x40
+#define XD_BA_NO_TRANSFORM 0x00
+#define XD_NO_CALC_ECC 0x20
+#define XD_CALC_ECC 0x00
+#define XD_IGNORE_ECC 0x10
+#define XD_CHECK_ECC 0x00
+#define XD_DIRECT_TO_RB 0x08
+#define XD_ADDR_LENGTH_0 0x00
+#define XD_ADDR_LENGTH_1 0x01
+#define XD_ADDR_LENGTH_2 0x02
+#define XD_ADDR_LENGTH_3 0x03
+#define XD_ADDR_LENGTH_4 0x04
+
+#define XD_GPG 0xFF
+#define XD_BPG 0x00
+
+#define XD_GBLK 0xFF
+#define XD_LATER_BBLK 0xF0
+
+#define XD_ECC2_ALL1 0x80
+#define XD_ECC1_ALL1 0x40
+#define XD_BA2_ALL0 0x20
+#define XD_BA1_ALL0 0x10
+#define XD_BA1_BA2_EQL 0x04
+#define XD_BA2_VALID 0x02
+#define XD_BA1_VALID 0x01
+
+#define XD_PGSTS_ZEROBIT_OVER4 0x00
+#define XD_PGSTS_NOT_FF 0x02
+#define XD_AUTO_CHK_DATA_STATUS 0x01
+
+#define RSTB_MODE_DETECT 0x80
+#define MODE_OUT_VLD 0x40
+#define MODE_OUT_0_NONE 0x00
+#define MODE_OUT_10_NONE 0x04
+#define MODE_OUT_10_47 0x05
+#define MODE_OUT_10_180 0x06
+#define MODE_OUT_10_680 0x07
+#define MODE_OUT_16_NONE 0x08
+#define MODE_OUT_16_47 0x09
+#define MODE_OUT_16_180 0x0A
+#define MODE_OUT_16_680 0x0B
+#define MODE_OUT_NONE_NONE 0x0C
+#define MODE_OUT_NONE_47 0x0D
+#define MODE_OUT_NONE_180 0x0E
+#define MODE_OUT_NONE_680 0x0F
+
+#define CARD_OC_INT_EN 0x20
+#define CARD_DETECT_EN 0x08
+
+#define MS_DETECT_EN 0x80
+#define MS_OCP_INT_EN 0x40
+#define MS_OCP_INT_CLR 0x20
+#define MS_OC_CLR 0x10
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define CARD_OCP_DETECT 0x80
+#define CARD_OC_NOW 0x08
+#define CARD_OC_EVER 0x04
+
+#define MS_OCP_DETECT 0x80
+#define MS_OC_NOW 0x40
+#define MS_OC_EVER 0x20
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define CARD_OC_INT_CLR 0x08
+#define CARD_OC_CLR 0x02
+
+#define SD_OCP_GLITCH_MASK 0x07
+#define SD_OCP_GLITCH_6_4 0x00
+#define SD_OCP_GLITCH_64 0x01
+#define SD_OCP_GLITCH_640 0x02
+#define SD_OCP_GLITCH_1000 0x03
+#define SD_OCP_GLITCH_2000 0x04
+#define SD_OCP_GLITCH_4000 0x05
+#define SD_OCP_GLITCH_8000 0x06
+#define SD_OCP_GLITCH_10000 0x07
+
+#define MS_OCP_GLITCH_MASK 0x70
+#define MS_OCP_GLITCH_6_4 (0x00 << 4)
+#define MS_OCP_GLITCH_64 (0x01 << 4)
+#define MS_OCP_GLITCH_640 (0x02 << 4)
+#define MS_OCP_GLITCH_1000 (0x03 << 4)
+#define MS_OCP_GLITCH_2000 (0x04 << 4)
+#define MS_OCP_GLITCH_4000 (0x05 << 4)
+#define MS_OCP_GLITCH_8000 (0x06 << 4)
+#define MS_OCP_GLITCH_10000 (0x07 << 4)
+
+#define OCP_TIME_60 0x00
+#define OCP_TIME_100 (0x01 << 3)
+#define OCP_TIME_200 (0x02 << 3)
+#define OCP_TIME_400 (0x03 << 3)
+#define OCP_TIME_600 (0x04 << 3)
+#define OCP_TIME_800 (0x05 << 3)
+#define OCP_TIME_1100 (0x06 << 3)
+#define OCP_TIME_MASK 0x38
+
+#define MS_OCP_TIME_60 0x00
+#define MS_OCP_TIME_100 (0x01 << 4)
+#define MS_OCP_TIME_200 (0x02 << 4)
+#define MS_OCP_TIME_400 (0x03 << 4)
+#define MS_OCP_TIME_600 (0x04 << 4)
+#define MS_OCP_TIME_800 (0x05 << 4)
+#define MS_OCP_TIME_1100 (0x06 << 4)
+#define MS_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+#define OCP_THD_315_417 0x00
+#define OCP_THD_283_783 (0x01 << 6)
+#define OCP_THD_244_946 (0x02 << 6)
+#define OCP_THD_191_1080 (0x03 << 6)
+#define OCP_THD_MASK 0xC0
+
+#define MS_OCP_THD_450 0x00
+#define MS_OCP_THD_550 (0x01 << 4)
+#define MS_OCP_THD_650 (0x02 << 4)
+#define MS_OCP_THD_750 (0x03 << 4)
+#define MS_OCP_THD_850 (0x04 << 4)
+#define MS_OCP_THD_950 (0x05 << 4)
+#define MS_OCP_THD_1050 (0x06 << 4)
+#define MS_OCP_THD_1150 (0x07 << 4)
+#define MS_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define FPGA_MS_PULL_CTL_EN 0xEF
+#define FPGA_SD_PULL_CTL_EN 0xF7
+#define FPGA_XD_PULL_CTL_EN1 0xFE
+#define FPGA_XD_PULL_CTL_EN2 0xFD
+#define FPGA_XD_PULL_CTL_EN3 0xFB
+
+#define FPGA_MS_PULL_CTL_BIT 0x10
+#define FPGA_SD_PULL_CTL_BIT 0x08
+
+#define BLINK_EN 0x08
+#define LED_GPIO0 (0 << 4)
+#define LED_GPIO1 (1 << 4)
+#define LED_GPIO2 (2 << 4)
+
+#define SDIO_BUS_CTRL 0x01
+#define SDIO_CD_CTRL 0x02
+
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
+
+#define SSC_DEPTH_MASK 0x07
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_4M 0x01
+#define SSC_DEPTH_2M 0x02
+#define SSC_DEPTH_1M 0x03
+#define SSC_DEPTH_512K 0x04
+#define SSC_DEPTH_256K 0x05
+#define SSC_DEPTH_128K 0x06
+#define SSC_DEPTH_64K 0x07
+
+#define XD_D3_NP 0x00
+#define XD_D3_PD (0x01 << 6)
+#define XD_D3_PU (0x02 << 6)
+#define XD_D2_NP 0x00
+#define XD_D2_PD (0x01 << 4)
+#define XD_D2_PU (0x02 << 4)
+#define XD_D1_NP 0x00
+#define XD_D1_PD (0x01 << 2)
+#define XD_D1_PU (0x02 << 2)
+#define XD_D0_NP 0x00
+#define XD_D0_PD 0x01
+#define XD_D0_PU 0x02
+
+#define SD_D7_NP 0x00
+#define SD_D7_PD (0x01 << 4)
+#define SD_DAT7_PU (0x02 << 4)
+#define SD_CLK_NP 0x00
+#define SD_CLK_PD (0x01 << 2)
+#define SD_CLK_PU (0x02 << 2)
+#define SD_D5_NP 0x00
+#define SD_D5_PD 0x01
+#define SD_D5_PU 0x02
+
+#define MS_D1_NP 0x00
+#define MS_D1_PD (0x01 << 6)
+#define MS_D1_PU (0x02 << 6)
+#define MS_D2_NP 0x00
+#define MS_D2_PD (0x01 << 4)
+#define MS_D2_PU (0x02 << 4)
+#define MS_CLK_NP 0x00
+#define MS_CLK_PD (0x01 << 2)
+#define MS_CLK_PU (0x02 << 2)
+#define MS_D6_NP 0x00
+#define MS_D6_PD 0x01
+#define MS_D6_PU 0x02
+
+#define XD_D7_NP 0x00
+#define XD_D7_PD (0x01 << 6)
+#define XD_D7_PU (0x02 << 6)
+#define XD_D6_NP 0x00
+#define XD_D6_PD (0x01 << 4)
+#define XD_D6_PU (0x02 << 4)
+#define XD_D5_NP 0x00
+#define XD_D5_PD (0x01 << 2)
+#define XD_D5_PU (0x02 << 2)
+#define XD_D4_NP 0x00
+#define XD_D4_PD 0x01
+#define XD_D4_PU 0x02
+
+#define SD_D6_NP 0x00
+#define SD_D6_PD (0x01 << 6)
+#define SD_D6_PU (0x02 << 6)
+#define SD_D0_NP 0x00
+#define SD_D0_PD (0x01 << 4)
+#define SD_D0_PU (0x02 << 4)
+#define SD_D1_NP 0x00
+#define SD_D1_PD 0x01
+#define SD_D1_PU 0x02
+
+#define MS_D3_NP 0x00
+#define MS_D3_PD (0x01 << 6)
+#define MS_D3_PU (0x02 << 6)
+#define MS_D0_NP 0x00
+#define MS_D0_PD (0x01 << 4)
+#define MS_D0_PU (0x02 << 4)
+#define MS_BS_NP 0x00
+#define MS_BS_PD (0x01 << 2)
+#define MS_BS_PU (0x02 << 2)
+
+#define XD_WP_NP 0x00
+#define XD_WP_PD (0x01 << 6)
+#define XD_WP_PU (0x02 << 6)
+#define XD_CE_NP 0x00
+#define XD_CE_PD (0x01 << 3)
+#define XD_CE_PU (0x02 << 3)
+#define XD_CLE_NP 0x00
+#define XD_CLE_PD (0x01 << 1)
+#define XD_CLE_PU (0x02 << 1)
+#define XD_CD_PD 0x00
+#define XD_CD_PU 0x01
+
+#define SD_D4_NP 0x00
+#define SD_D4_PD (0x01 << 6)
+#define SD_D4_PU (0x02 << 6)
+
+#define MS_D7_NP 0x00
+#define MS_D7_PD (0x01 << 6)
+#define MS_D7_PU (0x02 << 6)
+
+#define XD_RDY_NP 0x00
+#define XD_RDY_PD (0x01 << 6)
+#define XD_RDY_PU (0x02 << 6)
+#define XD_WE_NP 0x00
+#define XD_WE_PD (0x01 << 4)
+#define XD_WE_PU (0x02 << 4)
+#define XD_RE_NP 0x00
+#define XD_RE_PD (0x01 << 2)
+#define XD_RE_PU (0x02 << 2)
+#define XD_ALE_NP 0x00
+#define XD_ALE_PD 0x01
+#define XD_ALE_PU 0x02
+
+#define SD_D3_NP 0x00
+#define SD_D3_PD (0x01 << 4)
+#define SD_D3_PU (0x02 << 4)
+#define SD_D2_NP 0x00
+#define SD_D2_PD (0x01 << 2)
+#define SD_D2_PU (0x02 << 2)
+
+#define MS_INS_PD 0x00
+#define MS_INS_PU (0x01 << 7)
+#define SD_WP_NP 0x00
+#define SD_WP_PD (0x01 << 5)
+#define SD_WP_PU (0x02 << 5)
+#define SD_CD_PD 0x00
+#define SD_CD_PU (0x01 << 4)
+#define SD_CMD_NP 0x00
+#define SD_CMD_PD (0x01 << 2)
+#define SD_CMD_PU (0x02 << 2)
+
+#define MS_D5_NP 0x00
+#define MS_D5_PD (0x01 << 2)
+#define MS_D5_PU (0x02 << 2)
+#define MS_D4_NP 0x00
+#define MS_D4_PD 0x01
+#define MS_D4_PU 0x02
+
+#define FORCE_PM_CLOCK 0x10
+#define EN_CLOCK_PM 0x01
+
+#define HOST_ENTER_S3 0x02
+#define HOST_ENTER_S1 0x01
+
+#define AUX_PWR_DETECTED 0x01
+
+#define PHY_DEBUG_MODE 0x01
+
+#define SPI_COMMAND_BIT_8 0xE0
+#define SPI_ADDRESS_BIT_24 0x17
+#define SPI_ADDRESS_BIT_32 0x1F
+
+#define SPI_TRANSFER0_START 0x80
+#define SPI_TRANSFER0_END 0x40
+#define SPI_C_MODE0 0x00
+#define SPI_CA_MODE0 0x01
+#define SPI_CDO_MODE0 0x02
+#define SPI_CDI_MODE0 0x03
+#define SPI_CADO_MODE0 0x04
+#define SPI_CADI_MODE0 0x05
+#define SPI_POLLING_MODE0 0x06
+
+#define SPI_TRANSFER1_START 0x80
+#define SPI_TRANSFER1_END 0x40
+#define SPI_DO_MODE1 0x00
+#define SPI_DI_MODE1 0x01
+
+#define CS_POLARITY_HIGH 0x40
+#define CS_POLARITY_LOW 0x00
+#define DTO_MSB_FIRST 0x00
+#define DTO_LSB_FIRST 0x20
+#define SPI_MASTER 0x00
+#define SPI_SLAVE 0x10
+#define SPI_MODE0 0x00
+#define SPI_MODE1 0x04
+#define SPI_MODE2 0x08
+#define SPI_MODE3 0x0C
+#define SPI_MANUAL 0x00
+#define SPI_HALF_AUTO 0x01
+#define SPI_AUTO 0x02
+#define SPI_EEPROM_AUTO 0x03
+
+#define EDO_TIMING_MASK 0x03
+#define SAMPLE_RISING 0x00
+#define SAMPLE_DELAY_HALF 0x01
+#define SAMPLE_DELAY_ONE 0x02
+#define SAPMLE_DELAY_ONE_HALF 0x03
+#define TCS_MASK 0x0C
+
+#define NOT_BYPASS_SD 0x02
+#define DISABLE_SDIO_FUNC 0x04
+#define SELECT_1LUN 0x08
+
+#define PWR_GATE_EN 0x01
+#define LDO3318_PWR_MASK 0x06
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x04
+#define LDO_OFF 0x06
+
+#define SD_CFG1 0xFDA0
+#define SD_CFG2 0xFDA1
+#define SD_CFG3 0xFDA2
+#define SD_STAT1 0xFDA3
+#define SD_STAT2 0xFDA4
+#define SD_BUS_STAT 0xFDA5
+#define SD_PAD_CTL 0xFDA6
+#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define SD_PUSH_POINT_CTL 0xFDA8
+#define SD_CMD0 0xFDA9
+#define SD_CMD1 0xFDAA
+#define SD_CMD2 0xFDAB
+#define SD_CMD3 0xFDAC
+#define SD_CMD4 0xFDAD
+#define SD_CMD5 0xFDAE
+#define SD_BYTE_CNT_L 0xFDAF
+#define SD_BYTE_CNT_H 0xFDB0
+#define SD_BLOCK_CNT_L 0xFDB1
+#define SD_BLOCK_CNT_H 0xFDB2
+#define SD_TRANSFER 0xFDB3
+#define SD_CMD_STATE 0xFDB5
+#define SD_DATA_STATE 0xFDB6
+
+#define DCM_DRP_CTL 0xFC23
+#define DCM_DRP_TRIG 0xFC24
+#define DCM_DRP_CFG 0xFC25
+#define DCM_DRP_WR_DATA_L 0xFC26
+#define DCM_DRP_WR_DATA_H 0xFC27
+#define DCM_DRP_RD_DATA_L 0xFC28
+#define DCM_DRP_RD_DATA_H 0xFC29
+#define SD_VPCLK0_CTL 0xFC2A
+#define SD_VPCLK1_CTL 0xFC2B
+#define SD_DCMPS0_CTL 0xFC2C
+#define SD_DCMPS1_CTL 0xFC2D
+#define SD_VPTX_CTL SD_VPCLK0_CTL
+#define SD_VPRX_CTL SD_VPCLK1_CTL
+#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL
+#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL
+
+#define CARD_CLK_SOURCE 0xFC2E
+
+#define CARD_PWR_CTL 0xFD50
+#define CARD_CLK_SWITCH 0xFD51
+#define CARD_SHARE_MODE 0xFD52
+#define CARD_DRIVE_SEL 0xFD53
+#define CARD_STOP 0xFD54
+#define CARD_OE 0xFD55
+#define CARD_AUTO_BLINK 0xFD56
+#define CARD_GPIO_DIR 0xFD57
+#define CARD_GPIO 0xFD58
+
+#define CARD_DATA_SOURCE 0xFD5B
+#define CARD_SELECT 0xFD5C
+#define SD30_DRIVE_SEL 0xFD5E
+
+#define CARD_CLK_EN 0xFD69
+
+#define SDIO_CTRL 0xFD6B
+
+#define FPDCTL 0xFC00
+#define PDINFO 0xFC01
+
+#define CLK_CTL 0xFC02
+#define CLK_DIV 0xFC03
+#define CLK_SEL 0xFC04
+
+#define SSC_DIV_N_0 0xFC0F
+#define SSC_DIV_N_1 0xFC10
+
+#define RCCTL 0xFC14
+
+#define FPGA_PULL_CTL 0xFC1D
+
+#define CARD_PULL_CTL1 0xFD60
+#define CARD_PULL_CTL2 0xFD61
+#define CARD_PULL_CTL3 0xFD62
+#define CARD_PULL_CTL4 0xFD63
+#define CARD_PULL_CTL5 0xFD64
+#define CARD_PULL_CTL6 0xFD65
+
+#define IRQEN0 0xFE20
+#define IRQSTAT0 0xFE21
+#define IRQEN1 0xFE22
+#define IRQSTAT1 0xFE23
+#define TLPRIEN 0xFE24
+#define TLPRISTAT 0xFE25
+#define TLPTIEN 0xFE26
+#define TLPTISTAT 0xFE27
+#define DMATC0 0xFE28
+#define DMATC1 0xFE29
+#define DMATC2 0xFE2A
+#define DMATC3 0xFE2B
+#define DMACTL 0xFE2C
+#define BCTL 0xFE2D
+#define RBBC0 0xFE2E
+#define RBBC1 0xFE2F
+#define RBDAT 0xFE30
+#define RBCTL 0xFE34
+#define CFGADDR0 0xFE35
+#define CFGADDR1 0xFE36
+#define CFGDATA0 0xFE37
+#define CFGDATA1 0xFE38
+#define CFGDATA2 0xFE39
+#define CFGDATA3 0xFE3A
+#define CFGRWCTL 0xFE3B
+#define PHYRWCTL 0xFE3C
+#define PHYDATA0 0xFE3D
+#define PHYDATA1 0xFE3E
+#define PHYADDR 0xFE3F
+#define MSGRXDATA0 0xFE40
+#define MSGRXDATA1 0xFE41
+#define MSGRXDATA2 0xFE42
+#define MSGRXDATA3 0xFE43
+#define MSGTXDATA0 0xFE44
+#define MSGTXDATA1 0xFE45
+#define MSGTXDATA2 0xFE46
+#define MSGTXDATA3 0xFE47
+#define MSGTXCTL 0xFE48
+#define PETXCFG 0xFE49
+
+#define CDRESUMECTL 0xFE52
+#define WAKE_SEL_CTL 0xFE54
+#define PME_FORCE_CTL 0xFE56
+#define ASPM_FORCE_CTL 0xFE57
+#define PM_CLK_FORCE_CTL 0xFE58
+#define PERST_GLITCH_WIDTH 0xFE5C
+#define CHANGE_LINK_STATE 0xFE5B
+#define RESET_LOAD_REG 0xFE5E
+#define HOST_SLEEP_STATE 0xFE60
+#define MAIN_PWR_OFF_CTL 0xFE70 /* RTS5208 */
+
+#define NFTS_TX_CTRL 0xFE72
+
+#define PWR_GATE_CTRL 0xFE75
+#define PWD_SUSPEND_EN 0xFE76
+
+#define EFUSE_CONTENT 0xFE5F
+
+#define XD_INIT 0xFD10
+#define XD_DTCTL 0xFD11
+#define XD_CTL 0xFD12
+#define XD_TRANSFER 0xFD13
+#define XD_CFG 0xFD14
+#define XD_ADDRESS0 0xFD15
+#define XD_ADDRESS1 0xFD16
+#define XD_ADDRESS2 0xFD17
+#define XD_ADDRESS3 0xFD18
+#define XD_ADDRESS4 0xFD19
+#define XD_DAT 0xFD1A
+#define XD_PAGE_CNT 0xFD1B
+#define XD_PAGE_STATUS 0xFD1C
+#define XD_BLOCK_STATUS 0xFD1D
+#define XD_BLOCK_ADDR1_L 0xFD1E
+#define XD_BLOCK_ADDR1_H 0xFD1F
+#define XD_BLOCK_ADDR2_L 0xFD20
+#define XD_BLOCK_ADDR2_H 0xFD21
+#define XD_BYTE_CNT_L 0xFD22
+#define XD_BYTE_CNT_H 0xFD23
+#define XD_PARITY 0xFD24
+#define XD_ECC_BIT1 0xFD25
+#define XD_ECC_BYTE1 0xFD26
+#define XD_ECC_BIT2 0xFD27
+#define XD_ECC_BYTE2 0xFD28
+#define XD_RESERVED0 0xFD29
+#define XD_RESERVED1 0xFD2A
+#define XD_RESERVED2 0xFD2B
+#define XD_RESERVED3 0xFD2C
+#define XD_CHK_DATA_STATUS 0xFD2D
+#define XD_CATCTL 0xFD2E
+
+#define MS_CFG 0xFD40
+#define MS_TPC 0xFD41
+#define MS_TRANS_CFG 0xFD42
+#define MS_TRANSFER 0xFD43
+#define MS_INT_REG 0xFD44
+#define MS_BYTE_CNT 0xFD45
+#define MS_SECTOR_CNT_L 0xFD46
+#define MS_SECTOR_CNT_H 0xFD47
+#define MS_DBUS_H 0xFD48
+
+#define SSC_CTL1 0xFC11
+#define SSC_CTL2 0xFC12
+
+#define OCPCTL 0xFC15
+#define OCPSTAT 0xFC16
+#define OCPCLR 0xFC17 /* 5208 */
+#define OCPPARA1 0xFC18
+#define OCPPARA2 0xFC19
+
+#define EFUSE_OP 0xFC20
+#define EFUSE_CTRL 0xFC21
+#define EFUSE_DATA 0xFC22
+
+#define SPI_COMMAND 0xFD80
+#define SPI_ADDR0 0xFD81
+#define SPI_ADDR1 0xFD82
+#define SPI_ADDR2 0xFD83
+#define SPI_ADDR3 0xFD84
+#define SPI_CA_NUMBER 0xFD85
+#define SPI_LENGTH0 0xFD86
+#define SPI_LENGTH1 0xFD87
+#define SPI_DATA 0xFD88
+#define SPI_DATA_NUMBER 0xFD89
+#define SPI_TRANSFER0 0xFD90
+#define SPI_TRANSFER1 0xFD91
+#define SPI_CONTROL 0xFD92
+#define SPI_SIG 0xFD93
+#define SPI_TCTL 0xFD94
+#define SPI_SLAVE_NUM 0xFD95
+#define SPI_CLK_DIVIDER0 0xFD96
+#define SPI_CLK_DIVIDER1 0xFD97
+
+#define SRAM_BASE 0xE600
+#define RBUF_BASE 0xF400
+#define PPBUF_BASE1 0xF800
+#define PPBUF_BASE2 0xFA00
+#define IMAGE_FLAG_ADDR0 0xCE80
+#define IMAGE_FLAG_ADDR1 0xCE81
+
+#define READ_OP 1
+#define WRITE_OP 2
+
+#define LCTLR 0x80
+
+#define POLLING_WAIT_CNT 1
+#define IDLE_MAX_COUNT 10
+#define SDIO_IDLE_COUNT 10
+
+#define DEBOUNCE_CNT 5
+
+void do_remaining_work(struct rtsx_chip *chip);
+void try_to_switch_sdio_ctrl(struct rtsx_chip *chip);
+void do_reset_sd_card(struct rtsx_chip *chip);
+void do_reset_xd_card(struct rtsx_chip *chip);
+void do_reset_ms_card(struct rtsx_chip *chip);
+void rtsx_power_off_card(struct rtsx_chip *chip);
+void rtsx_release_cards(struct rtsx_chip *chip);
+void rtsx_reset_cards(struct rtsx_chip *chip);
+void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip);
+void rtsx_init_cards(struct rtsx_chip *chip);
+int switch_ssc_clock(struct rtsx_chip *chip, int clk);
+int switch_normal_clock(struct rtsx_chip *chip, int clk);
+int enable_card_clock(struct rtsx_chip *chip, u8 card);
+int disable_card_clock(struct rtsx_chip *chip, u8 card);
+int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
+void trans_dma_enable(enum dma_data_direction dir,
+ struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
+void toggle_gpio(struct rtsx_chip *chip, u8 gpio);
+void turn_on_led(struct rtsx_chip *chip, u8 gpio);
+void turn_off_led(struct rtsx_chip *chip, u8 gpio);
+
+int card_share_mode(struct rtsx_chip *chip, int card);
+int select_card(struct rtsx_chip *chip, int card);
+int detect_card_cd(struct rtsx_chip *chip, int card);
+int check_card_exist(struct rtsx_chip *chip, unsigned int lun);
+int check_card_ready(struct rtsx_chip *chip, unsigned int lun);
+int check_card_wp(struct rtsx_chip *chip, unsigned int lun);
+int check_card_fail(struct rtsx_chip *chip, unsigned int lun);
+int check_card_ejected(struct rtsx_chip *chip, unsigned int lun);
+void eject_card(struct rtsx_chip *chip, unsigned int lun);
+u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
+
+static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if ((get_lun_card(chip, lun) == SD_CARD) &&
+ (sd_card->sd_lock_status & SD_LOCKED))
+ return 0;
+ else
+ return chip->capacity[lun];
+#else
+ return chip->capacity[lun];
+#endif
+}
+
+static inline int switch_clock(struct rtsx_chip *chip, int clk)
+{
+ int retval = 0;
+
+ if (chip->asic_code)
+ retval = switch_ssc_clock(chip, clk);
+ else
+ retval = switch_normal_clock(chip, clk);
+
+ return retval;
+}
+
+int card_power_on(struct rtsx_chip *chip, u8 card);
+int card_power_off(struct rtsx_chip *chip, u8 card);
+
+static inline int card_power_off_all(struct rtsx_chip *chip)
+{
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0x0F, 0x0F);
+
+ return STATUS_SUCCESS;
+}
+
+static inline void rtsx_clear_xd_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
+ XD_STOP | XD_CLR_ERR);
+}
+
+static inline void rtsx_clear_sd_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+}
+
+static inline void rtsx_clear_ms_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
+ MS_STOP | MS_CLR_ERR);
+}
+
+static inline void rtsx_clear_spi_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR,
+ SPI_STOP | SPI_CLR_ERR);
+}
+
+#ifdef SUPPORT_SDIO_ASPM
+void dynamic_configure_sdio_aspm(struct rtsx_chip *chip);
+#endif
+
+#endif /* __REALTEK_RTSX_CARD_H */
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
new file mode 100644
index 0000000..6426807
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -0,0 +1,1979 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "rtsx_chip.h"
+#include "rtsx_sys.h"
+#include "general.h"
+
+#include "sd.h"
+#include "xd.h"
+#include "ms.h"
+
+static void rtsx_calibration(struct rtsx_chip *chip)
+{
+ rtsx_write_phy_register(chip, 0x1B, 0x135E);
+ wait_timeout(10);
+ rtsx_write_phy_register(chip, 0x00, 0x0280);
+ rtsx_write_phy_register(chip, 0x01, 0x7112);
+ rtsx_write_phy_register(chip, 0x01, 0x7110);
+ rtsx_write_phy_register(chip, 0x01, 0x7112);
+ rtsx_write_phy_register(chip, 0x01, 0x7113);
+ rtsx_write_phy_register(chip, 0x00, 0x0288);
+}
+
+void rtsx_disable_card_int(struct rtsx_chip *chip)
+{
+ u32 reg = rtsx_readl(chip, RTSX_BIER);
+
+ reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
+ rtsx_writel(chip, RTSX_BIER, reg);
+}
+
+void rtsx_enable_card_int(struct rtsx_chip *chip)
+{
+ u32 reg = rtsx_readl(chip, RTSX_BIER);
+ int i;
+
+ for (i = 0; i <= chip->max_lun; i++) {
+ if (chip->lun2card[i] & XD_CARD)
+ reg |= XD_INT_EN;
+ if (chip->lun2card[i] & SD_CARD)
+ reg |= SD_INT_EN;
+ if (chip->lun2card[i] & MS_CARD)
+ reg |= MS_INT_EN;
+ }
+ if (chip->hw_bypass_sd)
+ reg &= ~((u32)SD_INT_EN);
+
+ rtsx_writel(chip, RTSX_BIER, reg);
+}
+
+void rtsx_enable_bus_int(struct rtsx_chip *chip)
+{
+ u32 reg = 0;
+#ifndef DISABLE_CARD_INT
+ int i;
+#endif
+
+ reg = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN;
+
+#ifndef DISABLE_CARD_INT
+ for (i = 0; i <= chip->max_lun; i++) {
+ RTSX_DEBUGP("lun2card[%d] = 0x%02x\n", i, chip->lun2card[i]);
+
+ if (chip->lun2card[i] & XD_CARD)
+ reg |= XD_INT_EN;
+ if (chip->lun2card[i] & SD_CARD)
+ reg |= SD_INT_EN;
+ if (chip->lun2card[i] & MS_CARD)
+ reg |= MS_INT_EN;
+ }
+ if (chip->hw_bypass_sd)
+ reg &= ~((u32)SD_INT_EN);
+#endif
+
+ if (chip->ic_version >= IC_VER_C)
+ reg |= DELINK_INT_EN;
+#ifdef SUPPORT_OCP
+ reg |= OC_INT_EN;
+#endif
+ if (!chip->adma_mode)
+ reg |= DATA_DONE_INT_EN;
+
+ /* Enable Bus Interrupt */
+ rtsx_writel(chip, RTSX_BIER, reg);
+
+ RTSX_DEBUGP("RTSX_BIER: 0x%08x\n", reg);
+}
+
+void rtsx_disable_bus_int(struct rtsx_chip *chip)
+{
+ rtsx_writel(chip, RTSX_BIER, 0);
+}
+
+static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
+{
+ if (chip->ignore_sd && CHK_SDIO_EXIST(chip)) {
+ if (chip->asic_code) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF,
+ FPGA_SD_PULL_CTL_EN);
+ }
+ RTSX_WRITE_REG(chip, CARD_SHARE_MODE, 0xFF, CARD_SHARE_48_SD);
+
+ /* Enable SDIO internal clock */
+ RTSX_WRITE_REG(chip, 0xFF2C, 0x01, 0x01);
+
+ RTSX_WRITE_REG(chip, SDIO_CTRL, 0xFF,
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
+
+ chip->sd_int = 1;
+ chip->sd_io = 1;
+ } else {
+ chip->need_reset |= SD_CARD;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef HW_AUTO_SWITCH_SD_BUS
+static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
+{
+ u8 tmp;
+ int sw_bypass_sd = 0;
+ int retval;
+
+ if (chip->driver_first_load) {
+ if (CHECK_PID(chip, 0x5288)) {
+ RTSX_READ_REG(chip, 0xFE5A, &tmp);
+ if (tmp & 0x08)
+ sw_bypass_sd = 1;
+ } else if (CHECK_PID(chip, 0x5208)) {
+ RTSX_READ_REG(chip, 0xFE70, &tmp);
+ if (tmp & 0x80)
+ sw_bypass_sd = 1;
+ }
+ } else {
+ if (chip->sdio_in_charge)
+ sw_bypass_sd = 1;
+ }
+ RTSX_DEBUGP("chip->sdio_in_charge = %d\n", chip->sdio_in_charge);
+ RTSX_DEBUGP("chip->driver_first_load = %d\n", chip->driver_first_load);
+ RTSX_DEBUGP("sw_bypass_sd = %d\n", sw_bypass_sd);
+
+ if (sw_bypass_sd) {
+ u8 cd_toggle_mask = 0;
+
+ RTSX_READ_REG(chip, TLPTISTAT, &tmp);
+ cd_toggle_mask = 0x08;
+
+ if (tmp & cd_toggle_mask) {
+ /* Disable sdio_bus_auto_switch */
+ if (CHECK_PID(chip, 0x5288))
+ RTSX_WRITE_REG(chip, 0xFE5A, 0x08, 0x00);
+ else if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, 0xFE70, 0x80, 0x00);
+
+ RTSX_WRITE_REG(chip, TLPTISTAT, 0xFF, tmp);
+
+ chip->need_reset |= SD_CARD;
+ } else {
+ RTSX_DEBUGP("Chip inserted with SDIO!\n");
+
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ }
+ retval = card_share_mode(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Enable sdio_bus_auto_switch */
+ if (CHECK_PID(chip, 0x5288))
+ RTSX_WRITE_REG(chip, 0xFE5A, 0x08, 0x08);
+ else if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, 0xFE70, 0x80, 0x80);
+
+ chip->chip_insert_with_sdio = 1;
+ chip->sd_io = 1;
+ }
+ } else {
+ RTSX_WRITE_REG(chip, TLPTISTAT, 0x08, 0x08);
+
+ chip->need_reset |= SD_CARD;
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+int rtsx_reset_chip(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
+
+ rtsx_disable_aspm(chip);
+
+ RTSX_WRITE_REG(chip, HOST_SLEEP_STATE, 0x03, 0x00);
+
+ /* Disable card clock */
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, 0x1E, 0);
+
+#ifdef SUPPORT_OCP
+ /* SSC power on, OCD power on */
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, 0);
+ else
+ RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, MS_OC_POWER_DOWN);
+
+ RTSX_WRITE_REG(chip, OCPPARA1, OCP_TIME_MASK, OCP_TIME_800);
+ RTSX_WRITE_REG(chip, OCPPARA2, OCP_THD_MASK, OCP_THD_244_946);
+ RTSX_WRITE_REG(chip, OCPCTL, 0xFF, CARD_OC_INT_EN | CARD_DETECT_EN);
+#else
+ /* OC power down */
+ RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, OC_POWER_DOWN);
+#endif
+
+ if (!CHECK_PID(chip, 0x5288))
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0xFF, 0x03);
+
+ /* Turn off LED */
+ RTSX_WRITE_REG(chip, CARD_GPIO, 0xFF, 0x03);
+
+ /* Reset delink mode */
+ RTSX_WRITE_REG(chip, CHANGE_LINK_STATE, 0x0A, 0);
+
+ /* Card driving select */
+ RTSX_WRITE_REG(chip, CARD_DRIVE_SEL, 0xFF, chip->card_drive_sel);
+
+#ifdef LED_AUTO_BLINK
+ RTSX_WRITE_REG(chip, CARD_AUTO_BLINK, 0xFF,
+ LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
+#endif
+
+ if (chip->asic_code) {
+ /* Enable SSC Clock */
+ RTSX_WRITE_REG(chip, SSC_CTL1, 0xFF, SSC_8X_EN | SSC_SEL_4M);
+ RTSX_WRITE_REG(chip, SSC_CTL2, 0xFF, 0x12);
+ }
+
+ /* Disable cd_pwr_save (u_force_rst_core_en=0, u_cd_rst_core_en=0)
+ 0xFE5B
+ bit[1] u_cd_rst_core_en rst_value = 0
+ bit[2] u_force_rst_core_en rst_value = 0
+ bit[5] u_mac_phy_rst_n_dbg rst_value = 1
+ bit[4] u_non_sticky_rst_n_dbg rst_value = 0
+ */
+ RTSX_WRITE_REG(chip, CHANGE_LINK_STATE, 0x16, 0x10);
+
+ /* Enable ASPM */
+ if (chip->aspm_l0s_l1_en) {
+ if (chip->dynamic_aspm) {
+ if (CHK_SDIO_EXIST(chip)) {
+ if (CHECK_PID(chip, 0x5288)) {
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ } else {
+ if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, ASPM_FORCE_CTL,
+ 0xFF, 0x3F);
+
+ retval = rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->aspm_level[0] = chip->aspm_l0s_l1_en;
+ if (CHK_SDIO_EXIST(chip)) {
+ chip->aspm_level[1] = chip->aspm_l0s_l1_en;
+ if (CHECK_PID(chip, 0x5288))
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+ else
+ retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ chip->aspm_enabled = 1;
+ }
+ } else {
+ if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
+ retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = rtsx_write_config_byte(chip, 0x81, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SDIO_EXIST(chip)) {
+ if (CHECK_PID(chip, 0x5288))
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0,
+ 0xFF00, 0x0100);
+ else
+ retval = rtsx_write_cfg_dw(chip, 1, 0xC0,
+ 0xFF00, 0x0100);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ if (CHECK_PID(chip, 0x5288)) {
+ if (!CHK_SDIO_EXIST(chip)) {
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF,
+ 0x0103);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+ }
+
+ RTSX_WRITE_REG(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
+
+ RTSX_WRITE_REG(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
+
+ /* Enable PCIE interrupt */
+ if (chip->asic_code) {
+ if (CHECK_PID(chip, 0x5208)) {
+ if (chip->phy_debug_mode) {
+ RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
+ rtsx_disable_bus_int(chip);
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+
+ if (chip->ic_version >= IC_VER_D) {
+ u16 reg;
+ retval = rtsx_read_phy_register(chip, 0x00,
+ &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFE7F;
+ reg |= 0x80;
+ retval = rtsx_write_phy_register(chip, 0x00,
+ reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rtsx_read_phy_register(chip, 0x1C,
+ &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFFF7;
+ retval = rtsx_write_phy_register(chip, 0x1C,
+ reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ if (chip->driver_first_load &&
+ (chip->ic_version < IC_VER_C))
+ rtsx_calibration(chip);
+
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+
+ chip->need_reset = 0;
+
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (chip->hw_bypass_sd)
+ goto NextCard;
+ RTSX_DEBUGP("In rtsx_reset_chip, chip->int_reg = 0x%x\n",
+ chip->int_reg);
+ if (chip->int_reg & SD_EXIST) {
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ if (CHECK_PID(chip, 0x5208) && (chip->ic_version < IC_VER_C))
+ retval = rtsx_pre_handle_sdio_old(chip);
+ else
+ retval = rtsx_pre_handle_sdio_new(chip);
+
+ RTSX_DEBUGP("chip->need_reset = 0x%x (rtsx_reset_chip)\n",
+ (unsigned int)(chip->need_reset));
+#else /* HW_AUTO_SWITCH_SD_BUS */
+ retval = rtsx_pre_handle_sdio_old(chip);
+#endif /* HW_AUTO_SWITCH_SD_BUS */
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else {
+ chip->sd_io = 0;
+ RTSX_WRITE_REG(chip, SDIO_CTRL, SDIO_BUS_CTRL | SDIO_CD_CTRL,
+ 0);
+ }
+
+NextCard:
+ if (chip->int_reg & XD_EXIST)
+ chip->need_reset |= XD_CARD;
+ if (chip->int_reg & MS_EXIST)
+ chip->need_reset |= MS_CARD;
+ if (chip->int_reg & CARD_EXIST)
+ RTSX_WRITE_REG(chip, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+
+ RTSX_DEBUGP("In rtsx_init_chip, chip->need_reset = 0x%x\n",
+ (unsigned int)(chip->need_reset));
+
+ RTSX_WRITE_REG(chip, RCCTL, 0x01, 0x00);
+
+ if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
+ /* Turn off main power when entering S3/S4 state */
+ RTSX_WRITE_REG(chip, MAIN_PWR_OFF_CTL, 0x03, 0x03);
+ }
+
+ if (chip->remote_wakeup_en && !chip->auto_delink_en) {
+ RTSX_WRITE_REG(chip, WAKE_SEL_CTL, 0x07, 0x07);
+ if (chip->aux_pwr_exist)
+ RTSX_WRITE_REG(chip, PME_FORCE_CTL, 0xFF, 0x33);
+ } else {
+ RTSX_WRITE_REG(chip, WAKE_SEL_CTL, 0x07, 0x04);
+ RTSX_WRITE_REG(chip, PME_FORCE_CTL, 0xFF, 0x30);
+ }
+
+ if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D))
+ RTSX_WRITE_REG(chip, PETXCFG, 0x1C, 0x14);
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
+ retval = rtsx_clr_phy_reg_bit(chip, 0x1C, 2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (chip->ft2_fast_mode) {
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
+ MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+ udelay(chip->pmos_pwr_on_interval);
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
+ MS_POWER_ON | SD_POWER_ON);
+
+ wait_timeout(200);
+ }
+
+ /* Reset card */
+ rtsx_reset_detected_cards(chip, 0);
+
+ chip->driver_first_load = 0;
+
+ return STATUS_SUCCESS;
+}
+
+static inline int check_sd_speed_prior(u32 sd_speed_prior)
+{
+ int i, fake_para = 0;
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp = (u8)(sd_speed_prior >> (i*8));
+ if ((tmp < 0x01) || (tmp > 0x04)) {
+ fake_para = 1;
+ break;
+ }
+ }
+
+ return !fake_para;
+}
+
+static inline int check_sd_current_prior(u32 sd_current_prior)
+{
+ int i, fake_para = 0;
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp = (u8)(sd_current_prior >> (i*8));
+ if (tmp > 0x03) {
+ fake_para = 1;
+ break;
+ }
+ }
+
+ return !fake_para;
+}
+
+static int rts5208_init(struct rtsx_chip *chip)
+{
+ int retval;
+ u16 reg = 0;
+ u8 val = 0;
+
+ RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
+ RTSX_READ_REG(chip, CLK_SEL, &val);
+ if (val == 0)
+ chip->asic_code = 1;
+ else
+ chip->asic_code = 0;
+
+ if (chip->asic_code) {
+ retval = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("Value of phy register 0x1C is 0x%x\n", reg);
+ chip->ic_version = (reg >> 4) & 0x07;
+ if (reg & PHY_DEBUG_MODE)
+ chip->phy_debug_mode = 1;
+ else
+ chip->phy_debug_mode = 0;
+
+ } else {
+ RTSX_READ_REG(chip, 0xFE80, &val);
+ chip->ic_version = val;
+ chip->phy_debug_mode = 0;
+ }
+
+ RTSX_READ_REG(chip, PDINFO, &val);
+ RTSX_DEBUGP("PDINFO: 0x%x\n", val);
+ if (val & AUX_PWR_DETECTED)
+ chip->aux_pwr_exist = 1;
+ else
+ chip->aux_pwr_exist = 0;
+
+ RTSX_READ_REG(chip, 0xFE50, &val);
+ if (val & 0x01)
+ chip->hw_bypass_sd = 1;
+ else
+ chip->hw_bypass_sd = 0;
+
+ rtsx_read_config_byte(chip, 0x0E, &val);
+ if (val & 0x80)
+ SET_SDIO_EXIST(chip);
+ else
+ CLR_SDIO_EXIST(chip);
+
+ if (chip->use_hw_setting) {
+ RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
+ if (val & 0x80)
+ chip->auto_delink_en = 1;
+ else
+ chip->auto_delink_en = 0;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int rts5288_init(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val = 0, max_func;
+ u32 lval = 0;
+
+ RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
+ RTSX_READ_REG(chip, CLK_SEL, &val);
+ if (val == 0)
+ chip->asic_code = 1;
+ else
+ chip->asic_code = 0;
+
+ chip->ic_version = 0;
+ chip->phy_debug_mode = 0;
+
+ RTSX_READ_REG(chip, PDINFO, &val);
+ RTSX_DEBUGP("PDINFO: 0x%x\n", val);
+ if (val & AUX_PWR_DETECTED)
+ chip->aux_pwr_exist = 1;
+ else
+ chip->aux_pwr_exist = 0;
+
+ RTSX_READ_REG(chip, CARD_SHARE_MODE, &val);
+ RTSX_DEBUGP("CARD_SHARE_MODE: 0x%x\n", val);
+ if (val & 0x04)
+ chip->baro_pkg = QFN;
+ else
+ chip->baro_pkg = LQFP;
+
+ RTSX_READ_REG(chip, 0xFE5A, &val);
+ if (val & 0x10)
+ chip->hw_bypass_sd = 1;
+ else
+ chip->hw_bypass_sd = 0;
+
+ retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ max_func = (u8)((lval >> 29) & 0x07);
+ RTSX_DEBUGP("Max function number: %d\n", max_func);
+ if (max_func == 0x02)
+ SET_SDIO_EXIST(chip);
+ else
+ CLR_SDIO_EXIST(chip);
+
+ if (chip->use_hw_setting) {
+ RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
+ if (val & 0x80)
+ chip->auto_delink_en = 1;
+ else
+ chip->auto_delink_en = 0;
+
+ if (CHECK_BARO_PKG(chip, LQFP))
+ chip->lun_mode = SD_MS_1LUN;
+ else
+ chip->lun_mode = DEFAULT_SINGLE;
+
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_init_chip(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ unsigned int i;
+
+ RTSX_DEBUGP("Vendor ID: 0x%04x, Product ID: 0x%04x\n",
+ chip->vendor_id, chip->product_id);
+
+ chip->ic_version = 0;
+
+#ifdef _MSG_TRACE
+ chip->msg_idx = 0;
+#endif
+
+ memset(xd_card, 0, sizeof(struct xd_info));
+ memset(sd_card, 0, sizeof(struct sd_info));
+ memset(ms_card, 0, sizeof(struct ms_info));
+
+ chip->xd_reset_counter = 0;
+ chip->sd_reset_counter = 0;
+ chip->ms_reset_counter = 0;
+
+ chip->xd_show_cnt = MAX_SHOW_CNT;
+ chip->sd_show_cnt = MAX_SHOW_CNT;
+ chip->ms_show_cnt = MAX_SHOW_CNT;
+
+ chip->sd_io = 0;
+ chip->auto_delink_cnt = 0;
+ chip->auto_delink_allowed = 1;
+ rtsx_set_stat(chip, RTSX_STAT_INIT);
+
+ chip->aspm_enabled = 0;
+ chip->chip_insert_with_sdio = 0;
+ chip->sdio_aspm = 0;
+ chip->sdio_idle = 0;
+ chip->sdio_counter = 0;
+ chip->cur_card = 0;
+ chip->phy_debug_mode = 0;
+ chip->sdio_func_exist = 0;
+ memset(chip->sdio_raw_data, 0, 12);
+
+ for (i = 0; i < MAX_ALLOWED_LUN_CNT; i++) {
+ set_sense_type(chip, i, SENSE_TYPE_NO_SENSE);
+ chip->rw_fail_cnt[i] = 0;
+ }
+
+ if (!check_sd_speed_prior(chip->sd_speed_prior))
+ chip->sd_speed_prior = 0x01040203;
+
+ RTSX_DEBUGP("sd_speed_prior = 0x%08x\n", chip->sd_speed_prior);
+
+ if (!check_sd_current_prior(chip->sd_current_prior))
+ chip->sd_current_prior = 0x00010203;
+
+ RTSX_DEBUGP("sd_current_prior = 0x%08x\n", chip->sd_current_prior);
+
+ if ((chip->sd_ddr_tx_phase > 31) || (chip->sd_ddr_tx_phase < 0))
+ chip->sd_ddr_tx_phase = 0;
+
+ if ((chip->mmc_ddr_tx_phase > 31) || (chip->mmc_ddr_tx_phase < 0))
+ chip->mmc_ddr_tx_phase = 0;
+
+ RTSX_WRITE_REG(chip, FPDCTL, SSC_POWER_DOWN, 0);
+ wait_timeout(200);
+ RTSX_WRITE_REG(chip, CLK_DIV, 0x07, 0x07);
+ RTSX_DEBUGP("chip->use_hw_setting = %d\n", chip->use_hw_setting);
+
+ if (CHECK_PID(chip, 0x5208)) {
+ retval = rts5208_init(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else if (CHECK_PID(chip, 0x5288)) {
+ retval = rts5288_init(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ if (chip->ss_en == 2)
+ chip->ss_en = 0;
+
+ RTSX_DEBUGP("chip->asic_code = %d\n", chip->asic_code);
+ RTSX_DEBUGP("chip->ic_version = 0x%x\n", chip->ic_version);
+ RTSX_DEBUGP("chip->phy_debug_mode = %d\n", chip->phy_debug_mode);
+ RTSX_DEBUGP("chip->aux_pwr_exist = %d\n", chip->aux_pwr_exist);
+ RTSX_DEBUGP("chip->sdio_func_exist = %d\n", chip->sdio_func_exist);
+ RTSX_DEBUGP("chip->hw_bypass_sd = %d\n", chip->hw_bypass_sd);
+ RTSX_DEBUGP("chip->aspm_l0s_l1_en = %d\n", chip->aspm_l0s_l1_en);
+ RTSX_DEBUGP("chip->lun_mode = %d\n", chip->lun_mode);
+ RTSX_DEBUGP("chip->auto_delink_en = %d\n", chip->auto_delink_en);
+ RTSX_DEBUGP("chip->ss_en = %d\n", chip->ss_en);
+ RTSX_DEBUGP("chip->baro_pkg = %d\n", chip->baro_pkg);
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 1;
+ chip->card2lun[XD_CARD] = 0xFF;
+ chip->lun2card[0] = SD_CARD;
+ chip->lun2card[1] = MS_CARD;
+ chip->max_lun = 1;
+ SET_SDIO_IGNORED(chip);
+ } else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 0;
+ chip->card2lun[XD_CARD] = 0xFF;
+ chip->lun2card[0] = SD_CARD | MS_CARD;
+ chip->max_lun = 0;
+ } else {
+ chip->card2lun[XD_CARD] = 0;
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 0;
+ chip->lun2card[0] = XD_CARD | SD_CARD | MS_CARD;
+ chip->max_lun = 0;
+ }
+
+ retval = rtsx_reset_chip(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+void rtsx_release_chip(struct rtsx_chip *chip)
+{
+ xd_free_l2p_tbl(chip);
+ ms_free_l2p_tbl(chip);
+ chip->card_exist = 0;
+ chip->card_ready = 0;
+}
+
+#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
+static inline void rtsx_blink_led(struct rtsx_chip *chip)
+{
+ if (chip->card_exist && chip->blink_led) {
+ if (chip->led_toggle_counter < LED_TOGGLE_INTERVAL) {
+ chip->led_toggle_counter++;
+ } else {
+ chip->led_toggle_counter = 0;
+ toggle_gpio(chip, LED_GPIO);
+ }
+ }
+}
+#endif
+
+static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
+{
+ int maybe_support_aspm, reg_changed;
+ u32 tmp = 0;
+ u8 reg0 = 0, reg1 = 0;
+
+ maybe_support_aspm = 0;
+ reg_changed = 0;
+ rtsx_read_config_byte(chip, LCTLR, &reg0);
+ if (chip->aspm_level[0] != reg0) {
+ reg_changed = 1;
+ chip->aspm_level[0] = reg0;
+ }
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
+ rtsx_read_cfg_dw(chip, 1, 0xC0, &tmp);
+ reg1 = (u8)tmp;
+ if (chip->aspm_level[1] != reg1) {
+ reg_changed = 1;
+ chip->aspm_level[1] = reg1;
+ }
+
+ if ((reg0 & 0x03) && (reg1 & 0x03))
+ maybe_support_aspm = 1;
+
+ } else {
+ if (reg0 & 0x03)
+ maybe_support_aspm = 1;
+
+ }
+
+ if (reg_changed) {
+ if (maybe_support_aspm)
+ chip->aspm_l0s_l1_en = 0x03;
+
+ RTSX_DEBUGP("aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n",
+ chip->aspm_level[0], chip->aspm_level[1]);
+
+ if (chip->aspm_l0s_l1_en) {
+ chip->aspm_enabled = 1;
+ } else {
+ chip->aspm_enabled = 0;
+ chip->sdio_aspm = 0;
+ }
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF,
+ 0x30 | chip->aspm_level[0] |
+ (chip->aspm_level[1] << 2));
+ }
+}
+
+void rtsx_polling_func(struct rtsx_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ int ss_allowed;
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_SUSPEND))
+ return;
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_DELINK))
+ goto Delink_Stage;
+
+ if (chip->polling_config) {
+ u8 val;
+ rtsx_read_config_byte(chip, 0, &val);
+ }
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_SS))
+ return;
+
+#ifdef SUPPORT_OCP
+ if (chip->ocp_int) {
+ rtsx_read_register(chip, OCPSTAT, &(chip->ocp_stat));
+
+ if (chip->card_exist & SD_CARD)
+ sd_power_off_card3v3(chip);
+ else if (chip->card_exist & MS_CARD)
+ ms_power_off_card3v3(chip);
+ else if (chip->card_exist & XD_CARD)
+ xd_power_off_card3v3(chip);
+
+ chip->ocp_int = 0;
+ }
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ if (chip->card_exist & SD_CARD) {
+ u8 val;
+ rtsx_read_register(chip, 0xFD30, &val);
+ if (val & 0x02) {
+ sd_card->sd_erase_status = SD_NOT_ERASE;
+ sd_card->sd_lock_notify = 1;
+ chip->need_reinit |= SD_CARD;
+ }
+ } else {
+ sd_card->sd_erase_status = SD_NOT_ERASE;
+ }
+ }
+#endif
+
+ rtsx_init_cards(chip);
+
+ if (chip->ss_en) {
+ ss_allowed = 1;
+
+ if (CHECK_PID(chip, 0x5288)) {
+ ss_allowed = 0;
+ } else {
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
+ u32 val;
+ rtsx_read_cfg_dw(chip, 1, 0x04, &val);
+ if (val & 0x07)
+ ss_allowed = 0;
+
+ }
+ }
+ } else {
+ ss_allowed = 0;
+ }
+
+ if (ss_allowed && !chip->sd_io) {
+ if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
+ chip->ss_counter = 0;
+ } else {
+ if (chip->ss_counter <
+ (chip->ss_idle_period / POLLING_INTERVAL)) {
+ chip->ss_counter++;
+ } else {
+ rtsx_exclusive_enter_ss(chip);
+ return;
+ }
+ }
+ }
+
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_monitor_aspm_config(chip);
+
+#ifdef SUPPORT_SDIO_ASPM
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) &&
+ chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ if (chip->sd_io) {
+ dynamic_configure_sdio_aspm(chip);
+ } else {
+ if (!chip->sdio_aspm) {
+ RTSX_DEBUGP("SDIO enter ASPM!\n");
+ rtsx_write_register(chip,
+ ASPM_FORCE_CTL, 0xFC,
+ 0x30 | (chip->aspm_level[1] << 2));
+ chip->sdio_aspm = 1;
+ }
+ }
+ }
+#endif
+ }
+
+ if (chip->idle_counter < IDLE_MAX_COUNT) {
+ chip->idle_counter++;
+ } else {
+ if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
+ RTSX_DEBUGP("Idle state!\n");
+ rtsx_set_stat(chip, RTSX_STAT_IDLE);
+
+#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
+ chip->led_toggle_counter = 0;
+#endif
+ rtsx_force_power_on(chip, SSC_PDCTL);
+
+ turn_off_led(chip, LED_GPIO);
+
+ if (chip->auto_power_down && !chip->card_ready && !chip->sd_io)
+ rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
+
+ }
+ }
+
+ switch (rtsx_get_stat(chip)) {
+ case RTSX_STAT_RUN:
+#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
+ rtsx_blink_led(chip);
+#endif
+ do_remaining_work(chip);
+ break;
+
+ case RTSX_STAT_IDLE:
+ if (chip->sd_io && !chip->sd_int)
+ try_to_switch_sdio_ctrl(chip);
+
+ rtsx_enable_aspm(chip);
+ break;
+
+ default:
+ break;
+ }
+
+
+#ifdef SUPPORT_OCP
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+#ifdef CONFIG_RTS5208_DEBUG
+ if (chip->ocp_stat &
+ (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER))
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+#endif
+
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ if (chip->card_exist & SD_CARD) {
+ rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
+ 0);
+ card_power_off(chip, SD_CARD);
+ chip->card_fail |= SD_CARD;
+ }
+ }
+ if (chip->ocp_stat & (MS_OC_NOW | MS_OC_EVER)) {
+ if (chip->card_exist & MS_CARD) {
+ rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
+ 0);
+ card_power_off(chip, MS_CARD);
+ chip->card_fail |= MS_CARD;
+ }
+ }
+ } else {
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ if (chip->card_exist & SD_CARD) {
+ rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
+ 0);
+ chip->card_fail |= SD_CARD;
+ } else if (chip->card_exist & MS_CARD) {
+ rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
+ 0);
+ chip->card_fail |= MS_CARD;
+ } else if (chip->card_exist & XD_CARD) {
+ rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN,
+ 0);
+ chip->card_fail |= XD_CARD;
+ }
+ card_power_off(chip, SD_CARD);
+ }
+ }
+#endif
+
+Delink_Stage:
+ if (chip->auto_delink_en && chip->auto_delink_allowed &&
+ !chip->card_ready && !chip->card_ejected && !chip->sd_io) {
+ int enter_L1 = chip->auto_delink_in_L1 && (
+ chip->aspm_l0s_l1_en || chip->ss_en);
+ int delink_stage1_cnt = chip->delink_stage1_step;
+ int delink_stage2_cnt = delink_stage1_cnt +
+ chip->delink_stage2_step;
+ int delink_stage3_cnt = delink_stage2_cnt +
+ chip->delink_stage3_step;
+
+ if (chip->auto_delink_cnt <= delink_stage3_cnt) {
+ if (chip->auto_delink_cnt == delink_stage1_cnt) {
+ rtsx_set_stat(chip, RTSX_STAT_DELINK);
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_set_phy_reg_bit(chip, 0x1C, 2);
+
+ if (chip->card_exist) {
+ RTSX_DEBUGP("False card inserted, do force delink\n");
+
+ if (enter_L1)
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+
+ rtsx_write_register(chip,
+ CHANGE_LINK_STATE, 0x0A,
+ 0x0A);
+
+ if (enter_L1)
+ rtsx_enter_L1(chip);
+
+ chip->auto_delink_cnt = delink_stage3_cnt + 1;
+ } else {
+ RTSX_DEBUGP("No card inserted, do delink\n");
+
+ if (enter_L1)
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0x02);
+
+ if (enter_L1)
+ rtsx_enter_L1(chip);
+
+ }
+ }
+
+ if (chip->auto_delink_cnt == delink_stage2_cnt) {
+ RTSX_DEBUGP("Try to do force delink\n");
+
+ if (enter_L1)
+ rtsx_exit_L1(chip);
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_set_phy_reg_bit(chip, 0x1C, 2);
+
+ rtsx_write_register(chip, CHANGE_LINK_STATE,
+ 0x0A, 0x0A);
+ }
+
+ chip->auto_delink_cnt++;
+ }
+ } else {
+ chip->auto_delink_cnt = 0;
+ }
+}
+
+void rtsx_undo_delink(struct rtsx_chip *chip)
+{
+ chip->auto_delink_allowed = 0;
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
+}
+
+/**
+ * rtsx_stop_cmd - stop command transfer and DMA transfer
+ * @chip: Realtek's card reader chip
+ * @card: flash card type
+ *
+ * Stop command transfer and DMA transfer.
+ * This function is called in error handler.
+ */
+void rtsx_stop_cmd(struct rtsx_chip *chip, int card)
+{
+ int i;
+
+ for (i = 0; i <= 8; i++) {
+ int addr = RTSX_HCBAR + i * 4;
+ u32 reg;
+ reg = rtsx_readl(chip, addr);
+ RTSX_DEBUGP("BAR (0x%02x): 0x%08x\n", addr, reg);
+ }
+ rtsx_writel(chip, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_writel(chip, RTSX_HDBCTLR, STOP_DMA);
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = 0xFE20 + (u16)i;
+ u8 val;
+ rtsx_read_register(chip, addr, &val);
+ RTSX_DEBUGP("0x%04X: 0x%02x\n", addr, val);
+ }
+
+ rtsx_write_register(chip, DMACTL, 0x80, 0x80);
+ rtsx_write_register(chip, RBCTL, 0x80, 0x80);
+}
+
+#define MAX_RW_REG_CNT 1024
+
+int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
+{
+ int i;
+ u32 val = 3 << 30;
+
+ val |= (u32)(addr & 0x3FFF) << 16;
+ val |= (u32)mask << 8;
+ val |= (u32)data;
+
+ rtsx_writel(chip, RTSX_HAIMR, val);
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ val = rtsx_readl(chip, RTSX_HAIMR);
+ if ((val & (1 << 31)) == 0) {
+ if (data != (u8)val)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+ }
+ }
+
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+}
+
+int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
+{
+ u32 val = 2 << 30;
+ int i;
+
+ if (data)
+ *data = 0;
+
+ val |= (u32)(addr & 0x3FFF) << 16;
+
+ rtsx_writel(chip, RTSX_HAIMR, val);
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ val = rtsx_readl(chip, RTSX_HAIMR);
+ if ((val & (1 << 31)) == 0)
+ break;
+ }
+
+ if (i >= MAX_RW_REG_CNT)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+
+ if (data)
+ *data = (u8)(val & 0xFF);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
+ u32 val)
+{
+ u8 mode = 0, tmp;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (mask & 0xFF) {
+ RTSX_WRITE_REG(chip, CFGDATA0 + i,
+ 0xFF, (u8)(val & mask & 0xFF));
+ mode |= (1 << i);
+ }
+ mask >>= 8;
+ val >>= 8;
+ }
+
+ if (mode) {
+ RTSX_WRITE_REG(chip, CFGADDR0, 0xFF, (u8)addr);
+ RTSX_WRITE_REG(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
+
+ RTSX_WRITE_REG(chip, CFGRWCTL, 0xFF,
+ 0x80 | mode | ((func_no & 0x03) << 4));
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ RTSX_READ_REG(chip, CFGRWCTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
+{
+ int i;
+ u8 tmp;
+ u32 data = 0;
+
+ RTSX_WRITE_REG(chip, CFGADDR0, 0xFF, (u8)addr);
+ RTSX_WRITE_REG(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
+ RTSX_WRITE_REG(chip, CFGRWCTL, 0xFF, 0x80 | ((func_no & 0x03) << 4));
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ RTSX_READ_REG(chip, CFGRWCTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+
+ for (i = 0; i < 4; i++) {
+ RTSX_READ_REG(chip, CFGDATA0 + i, &tmp);
+ data |= (u32)tmp << (i * 8);
+ }
+
+ if (val)
+ *val = data;
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
+ int len)
+{
+ u32 *data, *mask;
+ u16 offset = addr % 4;
+ u16 aligned_addr = addr - offset;
+ int dw_len, i, j;
+ int retval;
+
+ RTSX_DEBUGP("%s\n", __func__);
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ if ((len + offset) % 4)
+ dw_len = (len + offset) / 4 + 1;
+ else
+ dw_len = (len + offset) / 4;
+
+ RTSX_DEBUGP("dw_len = %d\n", dw_len);
+
+ data = vzalloc(dw_len * 4);
+ if (!data)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ mask = vzalloc(dw_len * 4);
+ if (!mask) {
+ vfree(data);
+ TRACE_RET(chip, STATUS_NOMEM);
+ }
+
+ j = 0;
+ for (i = 0; i < len; i++) {
+ mask[j] |= 0xFF << (offset * 8);
+ data[j] |= buf[i] << (offset * 8);
+ if (++offset == 4) {
+ j++;
+ offset = 0;
+ }
+ }
+
+ RTSX_DUMP(mask, dw_len * 4);
+ RTSX_DUMP(data, dw_len * 4);
+
+ for (i = 0; i < dw_len; i++) {
+ retval = rtsx_write_cfg_dw(chip, func, aligned_addr + i * 4,
+ mask[i], data[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(data);
+ vfree(mask);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ vfree(data);
+ vfree(mask);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
+ int len)
+{
+ u32 *data;
+ u16 offset = addr % 4;
+ u16 aligned_addr = addr - offset;
+ int dw_len, i, j;
+ int retval;
+
+ RTSX_DEBUGP("%s\n", __func__);
+
+ if ((len + offset) % 4)
+ dw_len = (len + offset) / 4 + 1;
+ else
+ dw_len = (len + offset) / 4;
+
+ RTSX_DEBUGP("dw_len = %d\n", dw_len);
+
+ data = vmalloc(dw_len * 4);
+ if (!data)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ for (i = 0; i < dw_len; i++) {
+ retval = rtsx_read_cfg_dw(chip, func, aligned_addr + i * 4,
+ data + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(data);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (buf) {
+ j = 0;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = (u8)(data[j] >> (offset * 8));
+ if (++offset == 4) {
+ j++;
+ offset = 0;
+ }
+ }
+ }
+
+ vfree(data);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val)
+{
+ int i, finished = 0;
+ u8 tmp;
+
+ RTSX_WRITE_REG(chip, PHYDATA0, 0xFF, (u8)val);
+ RTSX_WRITE_REG(chip, PHYDATA1, 0xFF, (u8)(val >> 8));
+ RTSX_WRITE_REG(chip, PHYADDR, 0xFF, addr);
+ RTSX_WRITE_REG(chip, PHYRWCTL, 0xFF, 0x81);
+
+ for (i = 0; i < 100000; i++) {
+ RTSX_READ_REG(chip, PHYRWCTL, &tmp);
+ if (!(tmp & 0x80)) {
+ finished = 1;
+ break;
+ }
+ }
+
+ if (!finished)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val)
+{
+ int i, finished = 0;
+ u16 data = 0;
+ u8 tmp;
+
+ RTSX_WRITE_REG(chip, PHYADDR, 0xFF, addr);
+ RTSX_WRITE_REG(chip, PHYRWCTL, 0xFF, 0x80);
+
+ for (i = 0; i < 100000; i++) {
+ RTSX_READ_REG(chip, PHYRWCTL, &tmp);
+ if (!(tmp & 0x80)) {
+ finished = 1;
+ break;
+ }
+ }
+
+ if (!finished)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PHYDATA0, &tmp);
+ data = tmp;
+ RTSX_READ_REG(chip, PHYDATA1, &tmp);
+ data |= (u16)tmp << 8;
+
+ if (val)
+ *val = data;
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val)
+{
+ int i;
+ u8 data = 0;
+
+ RTSX_WRITE_REG(chip, EFUSE_CTRL, 0xFF, 0x80|addr);
+
+ for (i = 0; i < 100; i++) {
+ RTSX_READ_REG(chip, EFUSE_CTRL, &data);
+ if (!(data & 0x80))
+ break;
+ udelay(1);
+ }
+
+ if (data & 0x80)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+
+ RTSX_READ_REG(chip, EFUSE_DATA, &data);
+ if (val)
+ *val = data;
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val)
+{
+ int i, j;
+ u8 data = 0, tmp = 0xFF;
+
+ for (i = 0; i < 8; i++) {
+ if (val & (u8)(1 << i))
+ continue;
+
+ tmp &= (~(u8)(1 << i));
+ RTSX_DEBUGP("Write 0x%x to 0x%x\n", tmp, addr);
+
+ RTSX_WRITE_REG(chip, EFUSE_DATA, 0xFF, tmp);
+ RTSX_WRITE_REG(chip, EFUSE_CTRL, 0xFF, 0xA0|addr);
+
+ for (j = 0; j < 100; j++) {
+ RTSX_READ_REG(chip, EFUSE_CTRL, &data);
+ if (!(data & 0x80))
+ break;
+ wait_timeout(3);
+ }
+
+ if (data & 0x80)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+
+ wait_timeout(5);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
+{
+ int retval;
+ u16 value;
+
+ retval = rtsx_read_phy_register(chip, reg, &value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (value & (1 << bit)) {
+ value &= ~(1 << bit);
+ retval = rtsx_write_phy_register(chip, reg, value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
+{
+ int retval;
+ u16 value;
+
+ retval = rtsx_read_phy_register(chip, reg, &value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (0 == (value & (1 << bit))) {
+ value |= (1 << bit);
+ retval = rtsx_write_phy_register(chip, reg, value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_check_link_ready(struct rtsx_chip *chip)
+{
+ u8 val;
+
+ RTSX_READ_REG(chip, IRQSTAT0, &val);
+
+ RTSX_DEBUGP("IRQSTAT0: 0x%x\n", val);
+ if (val & LINK_RDY_INT) {
+ RTSX_DEBUGP("Delinked!\n");
+ rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
+ return STATUS_FAIL;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
+{
+ u32 ultmp;
+
+ RTSX_DEBUGP("%04x set pm_dstate to %d\n", chip->product_id, dstate);
+
+ if (CHK_SDIO_EXIST(chip)) {
+ u8 func_no;
+
+ if (CHECK_PID(chip, 0x5288))
+ func_no = 2;
+ else
+ func_no = 1;
+
+ rtsx_read_cfg_dw(chip, func_no, 0x84, &ultmp);
+ RTSX_DEBUGP("pm_dstate of function %d: 0x%x\n", (int)func_no,
+ ultmp);
+ rtsx_write_cfg_dw(chip, func_no, 0x84, 0xFF, dstate);
+ }
+
+ rtsx_write_config_byte(chip, 0x44, dstate);
+ rtsx_write_config_byte(chip, 0x45, 0);
+}
+
+void rtsx_enter_L1(struct rtsx_chip *chip)
+{
+ rtsx_handle_pm_dstate(chip, 2);
+}
+
+void rtsx_exit_L1(struct rtsx_chip *chip)
+{
+ rtsx_write_config_byte(chip, 0x44, 0);
+ rtsx_write_config_byte(chip, 0x45, 0);
+}
+
+void rtsx_enter_ss(struct rtsx_chip *chip)
+{
+ RTSX_DEBUGP("Enter Selective Suspend State!\n");
+
+ rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
+
+ if (chip->power_down_in_ss) {
+ rtsx_power_off_card(chip);
+ rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
+ }
+
+ if (CHK_SDIO_EXIST(chip)) {
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF00, 0x0100);
+ else
+ rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF00, 0x0100);
+ }
+
+ if (chip->auto_delink_en) {
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x01, 0x01);
+ } else {
+ if (!chip->phy_debug_mode) {
+ u32 tmp;
+ tmp = rtsx_readl(chip, RTSX_BIER);
+ tmp |= CARD_INT;
+ rtsx_writel(chip, RTSX_BIER, tmp);
+ }
+
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0);
+ }
+
+ rtsx_enter_L1(chip);
+
+ RTSX_CLR_DELINK(chip);
+ rtsx_set_stat(chip, RTSX_STAT_SS);
+}
+
+void rtsx_exit_ss(struct rtsx_chip *chip)
+{
+ RTSX_DEBUGP("Exit Selective Suspend State!\n");
+
+ rtsx_exit_L1(chip);
+
+ if (chip->power_down_in_ss) {
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+ udelay(1000);
+ }
+
+ if (RTSX_TST_DELINK(chip)) {
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 1);
+ RTSX_CLR_DELINK(chip);
+ } else if (chip->power_down_in_ss) {
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 0);
+ }
+}
+
+int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
+{
+ u32 status, int_enable;
+ int exit_ss = 0;
+#ifdef SUPPORT_OCP
+ u32 ocp_int = 0;
+
+ ocp_int = OC_INT;
+#endif
+
+ if (chip->ss_en) {
+ chip->ss_counter = 0;
+ if (rtsx_get_stat(chip) == RTSX_STAT_SS) {
+ exit_ss = 1;
+ rtsx_exit_L1(chip);
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ }
+ }
+
+ int_enable = rtsx_readl(chip, RTSX_BIER);
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (((chip->int_reg & int_enable) == 0) ||
+ (chip->int_reg == 0xFFFFFFFF))
+ return STATUS_FAIL;
+
+ status = chip->int_reg &= (int_enable | 0x7FFFFF);
+
+ if (status & CARD_INT) {
+ chip->auto_delink_cnt = 0;
+
+ if (status & SD_INT) {
+ if (status & SD_EXIST) {
+ set_bit(SD_NR, &(chip->need_reset));
+ } else {
+ set_bit(SD_NR, &(chip->need_release));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ clear_bit(SD_NR, &(chip->need_reset));
+ }
+ } else {
+ /* If multi-luns, it's possible that
+ when plugging/unplugging one card
+ there is another card which still
+ exists in the slot. In this case,
+ all existed cards should be reset.
+ */
+ if (exit_ss && (status & SD_EXIST))
+ set_bit(SD_NR, &(chip->need_reinit));
+ }
+ if (!CHECK_PID(chip, 0x5288) || CHECK_BARO_PKG(chip, QFN)) {
+ if (status & XD_INT) {
+ if (status & XD_EXIST) {
+ set_bit(XD_NR, &(chip->need_reset));
+ } else {
+ set_bit(XD_NR, &(chip->need_release));
+ chip->xd_reset_counter = 0;
+ chip->xd_show_cnt = 0;
+ clear_bit(XD_NR, &(chip->need_reset));
+ }
+ } else {
+ if (exit_ss && (status & XD_EXIST))
+ set_bit(XD_NR, &(chip->need_reinit));
+ }
+ }
+ if (status & MS_INT) {
+ if (status & MS_EXIST) {
+ set_bit(MS_NR, &(chip->need_reset));
+ } else {
+ set_bit(MS_NR, &(chip->need_release));
+ chip->ms_reset_counter = 0;
+ chip->ms_show_cnt = 0;
+ clear_bit(MS_NR, &(chip->need_reset));
+ }
+ } else {
+ if (exit_ss && (status & MS_EXIST))
+ set_bit(MS_NR, &(chip->need_reinit));
+ }
+ }
+
+#ifdef SUPPORT_OCP
+ chip->ocp_int = ocp_int & status;
+#endif
+
+ if (chip->sd_io) {
+ if (chip->int_reg & DATA_DONE_INT)
+ chip->int_reg &= ~(u32)DATA_DONE_INT;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
+{
+ int retval;
+
+ RTSX_DEBUGP("rtsx_do_before_power_down, pm_stat = %d\n", pm_stat);
+
+ rtsx_set_stat(chip, RTSX_STAT_SUSPEND);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ rtsx_release_cards(chip);
+ rtsx_disable_bus_int(chip);
+ turn_off_led(chip, LED_GPIO);
+
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ if (chip->sd_io) {
+ chip->sdio_in_charge = 1;
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
+ /* Enable sdio_bus_auto_switch */
+ rtsx_write_register(chip, 0xFE70, 0x80, 0x80);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
+ /* Enable sdio_bus_auto_switch */
+ rtsx_write_register(chip, 0xFE5A, 0x08, 0x08);
+ }
+ }
+#endif
+
+ if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
+ /* u_force_clkreq_0 */
+ rtsx_write_register(chip, PETXCFG, 0x08, 0x08);
+ }
+
+ if (pm_stat == PM_S1) {
+ RTSX_DEBUGP("Host enter S1\n");
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
+ HOST_ENTER_S1);
+ } else if (pm_stat == PM_S3) {
+ if (chip->s3_pwr_off_delay > 0)
+ wait_timeout(chip->s3_pwr_off_delay);
+
+ RTSX_DEBUGP("Host enter S3\n");
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
+ HOST_ENTER_S3);
+ }
+
+ if (chip->do_delink_before_power_down && chip->auto_delink_en)
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 2);
+
+ rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
+
+ chip->cur_clk = 0;
+ chip->cur_card = 0;
+ chip->card_exist = 0;
+}
+
+void rtsx_enable_aspm(struct rtsx_chip *chip)
+{
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ if (!chip->aspm_enabled) {
+ RTSX_DEBUGP("Try to enable ASPM\n");
+ chip->aspm_enabled = 1;
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0);
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
+ 0x30 | chip->aspm_level[0]);
+ } else {
+ rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ }
+
+ if (CHK_SDIO_EXIST(chip)) {
+ u16 val = chip->aspm_l0s_l1_en | 0x0100;
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_cfg_dw(chip, 2, 0xC0,
+ 0xFFFF, val);
+ else
+ rtsx_write_cfg_dw(chip, 1, 0xC0,
+ 0xFFFF, val);
+ }
+ }
+ }
+
+ return;
+}
+
+void rtsx_disable_aspm(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208))
+ rtsx_monitor_aspm_config(chip);
+
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ if (chip->aspm_enabled) {
+ RTSX_DEBUGP("Try to disable ASPM\n");
+ chip->aspm_enabled = 0;
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0x0129);
+ if (CHECK_PID(chip, 0x5208))
+ rtsx_write_register(chip, ASPM_FORCE_CTL,
+ 0xF3, 0x30);
+ else
+ rtsx_write_config_byte(chip, LCTLR, 0x00);
+
+ wait_timeout(1);
+ }
+ }
+
+ return;
+}
+
+int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
+{
+ int retval;
+ int i, j;
+ u16 reg_addr;
+ u8 *ptr;
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ ptr = buf;
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < buf_len/256; i++) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < 256; j++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ memcpy(ptr, rtsx_get_cmd_data(chip), 256);
+ ptr += 256;
+ }
+
+ if (buf_len%256) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < buf_len%256; j++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(ptr, rtsx_get_cmd_data(chip), buf_len%256);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
+{
+ int retval;
+ int i, j;
+ u16 reg_addr;
+ u8 *ptr;
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ ptr = buf;
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < buf_len/256; i++) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < 256; j++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
+ *ptr);
+ ptr++;
+ }
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf_len%256) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < buf_len%256; j++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
+ *ptr);
+ ptr++;
+ }
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_check_chip_exist(struct rtsx_chip *chip)
+{
+ if (rtsx_readl(chip, 0) == 0xFFFFFFFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl)
+{
+ int retval;
+ u8 mask = 0;
+
+ if (ctl & SSC_PDCTL)
+ mask |= SSC_POWER_DOWN;
+
+#ifdef SUPPORT_OCP
+ if (ctl & OC_PDCTL) {
+ mask |= SD_OC_POWER_DOWN;
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ mask |= MS_OC_POWER_DOWN;
+ }
+#endif
+
+ if (mask) {
+ retval = rtsx_write_register(chip, FPDCTL, mask, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHECK_PID(chip, 0x5288))
+ wait_timeout(200);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl)
+{
+ int retval;
+ u8 mask = 0, val = 0;
+
+ if (ctl & SSC_PDCTL)
+ mask |= SSC_POWER_DOWN;
+
+#ifdef SUPPORT_OCP
+ if (ctl & OC_PDCTL) {
+ mask |= SD_OC_POWER_DOWN;
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ mask |= MS_OC_POWER_DOWN;
+ }
+#endif
+
+ if (mask) {
+ val = mask;
+ retval = rtsx_write_register(chip, FPDCTL, mask, val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
new file mode 100644
index 0000000..c25efcc
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -0,0 +1,1002 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_CHIP_H
+#define __REALTEK_RTSX_CHIP_H
+
+#include "rtsx.h"
+
+#define SUPPORT_CPRM
+#define SUPPORT_OCP
+#define SUPPORT_SDIO_ASPM
+#define SUPPORT_MAGIC_GATE
+#define SUPPORT_MSXC
+#define SUPPORT_SD_LOCK
+/* Hardware switch bus_ctl and cd_ctl automatically */
+#define HW_AUTO_SWITCH_SD_BUS
+/* Enable hardware interrupt write clear */
+#define HW_INT_WRITE_CLR
+/* #define LED_AUTO_BLINK */
+/* #define DISABLE_CARD_INT */
+
+#ifdef SUPPORT_MAGIC_GATE
+ /* Using NORMAL_WRITE instead of AUTO_WRITE to set ICV */
+ #define MG_SET_ICV_SLOW
+ /* HW may miss ERR/CMDNK signal when sampling INT status. */
+ #define MS_SAMPLE_INT_ERR
+ /* HW DO NOT support Wait_INT function during READ_BYTES
+ * transfer mode */
+ #define READ_BYTES_WAIT_INT
+#endif
+
+#ifdef SUPPORT_MSXC
+#define XC_POWERCLASS
+#define SUPPORT_PCGL_1P18
+#endif
+
+#ifndef LED_AUTO_BLINK
+#define REGULAR_BLINK
+#endif
+
+#define LED_BLINK_SPEED 5
+#define LED_TOGGLE_INTERVAL 6
+#define GPIO_TOGGLE_THRESHOLD 1024
+#define LED_GPIO 0
+
+#define POLLING_INTERVAL 30
+
+#define TRACE_ITEM_CNT 64
+
+#ifndef STATUS_SUCCESS
+#define STATUS_SUCCESS 0
+#endif
+#ifndef STATUS_FAIL
+#define STATUS_FAIL 1
+#endif
+#ifndef STATUS_TIMEDOUT
+#define STATUS_TIMEDOUT 2
+#endif
+#ifndef STATUS_NOMEM
+#define STATUS_NOMEM 3
+#endif
+#ifndef STATUS_READ_FAIL
+#define STATUS_READ_FAIL 4
+#endif
+#ifndef STATUS_WRITE_FAIL
+#define STATUS_WRITE_FAIL 5
+#endif
+#ifndef STATUS_ERROR
+#define STATUS_ERROR 10
+#endif
+
+#define PM_S1 1
+#define PM_S3 3
+
+/*
+ * Transport return codes
+ */
+
+#define TRANSPORT_GOOD 0 /* Transport good, command good */
+#define TRANSPORT_FAILED 1 /* Transport good, command failed */
+#define TRANSPORT_NO_SENSE 2 /* Command failed, no auto-sense */
+#define TRANSPORT_ERROR 3 /* Transport bad (i.e. device dead) */
+
+
+/*-----------------------------------
+ Start-Stop-Unit
+-----------------------------------*/
+#define STOP_MEDIUM 0x00 /* access disable */
+#define MAKE_MEDIUM_READY 0x01 /* access enable */
+#define UNLOAD_MEDIUM 0x02 /* unload */
+#define LOAD_MEDIUM 0x03 /* load */
+
+/*-----------------------------------
+ STANDARD_INQUIRY
+-----------------------------------*/
+#define QULIFIRE 0x00
+#define AENC_FNC 0x00
+#define TRML_IOP 0x00
+#define REL_ADR 0x00
+#define WBUS_32 0x00
+#define WBUS_16 0x00
+#define SYNC 0x00
+#define LINKED 0x00
+#define CMD_QUE 0x00
+#define SFT_RE 0x00
+
+#define VEN_ID_LEN 8 /* Vendor ID Length */
+#define PRDCT_ID_LEN 16 /* Product ID Length */
+#define PRDCT_REV_LEN 4 /* Product LOT Length */
+
+/* Dynamic flag definitions: used in set_bit() etc. */
+#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */
+#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in
+ * progress */
+#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect
+ * in progress */
+#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \
+ (1UL << US_FLIDX_DISCONNECTING))
+#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset
+ * in progress */
+#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI
+ * midlayer timed out */
+
+#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
+#define RMB_DISC 0x80 /* The Device is Removable */
+#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */
+
+#define SCSI 0x00 /* Interface ID */
+
+#define WRITE_PROTECTED_MEDIA 0x07
+
+/*---- sense key ----*/
+#define ILI 0x20 /* ILI bit is on */
+
+#define NO_SENSE 0x00 /* not exist sense key */
+#define RECOVER_ERR 0x01 /* Target/Logical unit is recoverd */
+#define NOT_READY 0x02 /* Logical unit is not ready */
+#define MEDIA_ERR 0x03 /* medium/data error */
+#define HARDWARE_ERR 0x04 /* hardware error */
+#define ILGAL_REQ 0x05 /* CDB/parameter/identify msg error */
+#define UNIT_ATTENTION 0x06 /* unit attention condition occur */
+#define DAT_PRTCT 0x07 /* read/write is desable */
+#define BLNC_CHK 0x08 /* find blank/DOF in read */
+ /* write to unblank area */
+#define CPY_ABRT 0x0a /* Copy/Compare/Copy&Verify illgal */
+#define ABRT_CMD 0x0b /* Target make the command in error */
+#define EQUAL 0x0c /* Search Data end with Equal */
+#define VLM_OVRFLW 0x0d /* Some data are left in buffer */
+#define MISCMP 0x0e /* find inequality */
+
+#define READ_ERR -1
+#define WRITE_ERR -2
+
+#define FIRST_RESET 0x01
+#define USED_EXIST 0x02
+
+/*-----------------------------------
+ SENSE_DATA
+-----------------------------------*/
+/*---- valid ----*/
+#define SENSE_VALID 0x80 /* Sense data is valid as SCSI2 */
+#define SENSE_INVALID 0x00 /* Sense data is invalid as SCSI2 */
+
+/*---- error code ----*/
+#define CUR_ERR 0x70 /* current error */
+#define DEF_ERR 0x71 /* specific command error */
+
+/*---- sense key Information ----*/
+#define SNSKEYINFO_LEN 3 /* length of sense key information */
+
+#define SKSV 0x80
+#define CDB_ILLEGAL 0x40
+#define DAT_ILLEGAL 0x00
+#define BPV 0x08
+#define BIT_ILLEGAL0 0 /* bit0 is illegal */
+#define BIT_ILLEGAL1 1 /* bit1 is illegal */
+#define BIT_ILLEGAL2 2 /* bit2 is illegal */
+#define BIT_ILLEGAL3 3 /* bit3 is illegal */
+#define BIT_ILLEGAL4 4 /* bit4 is illegal */
+#define BIT_ILLEGAL5 5 /* bit5 is illegal */
+#define BIT_ILLEGAL6 6 /* bit6 is illegal */
+#define BIT_ILLEGAL7 7 /* bit7 is illegal */
+
+/*---- ASC ----*/
+#define ASC_NO_INFO 0x00
+#define ASC_MISCMP 0x1d
+#define ASC_INVLD_CDB 0x24
+#define ASC_INVLD_PARA 0x26
+#define ASC_LU_NOT_READY 0x04
+#define ASC_WRITE_ERR 0x0c
+#define ASC_READ_ERR 0x11
+#define ASC_LOAD_EJCT_ERR 0x53
+#define ASC_MEDIA_NOT_PRESENT 0x3A
+#define ASC_MEDIA_CHANGED 0x28
+#define ASC_MEDIA_IN_PROCESS 0x04
+#define ASC_WRITE_PROTECT 0x27
+#define ASC_LUN_NOT_SUPPORTED 0x25
+
+/*---- ASQC ----*/
+#define ASCQ_NO_INFO 0x00
+#define ASCQ_MEDIA_IN_PROCESS 0x01
+#define ASCQ_MISCMP 0x00
+#define ASCQ_INVLD_CDB 0x00
+#define ASCQ_INVLD_PARA 0x02
+#define ASCQ_LU_NOT_READY 0x02
+#define ASCQ_WRITE_ERR 0x02
+#define ASCQ_READ_ERR 0x00
+#define ASCQ_LOAD_EJCT_ERR 0x00
+#define ASCQ_WRITE_PROTECT 0x00
+
+
+struct sense_data_t {
+ unsigned char err_code; /* error code */
+ /* bit7 : valid */
+ /* (1 : SCSI2) */
+ /* (0 : Vendor * specific) */
+ /* bit6-0 : error * code */
+ /* (0x70 : current * error) */
+ /* (0x71 : specific command error) */
+ unsigned char seg_no; /* segment No. */
+ unsigned char sense_key; /* byte5 : ILI */
+ /* bit3-0 : sense key */
+ unsigned char info[4]; /* information */
+ unsigned char ad_sense_len; /* additional sense data length */
+ unsigned char cmd_info[4]; /* command specific information */
+ unsigned char asc; /* ASC */
+ unsigned char ascq; /* ASCQ */
+ unsigned char rfu; /* FRU */
+ unsigned char sns_key_info[3];/* sense key specific information */
+};
+
+/* PCI Operation Register Address */
+#define RTSX_HCBAR 0x00
+#define RTSX_HCBCTLR 0x04
+#define RTSX_HDBAR 0x08
+#define RTSX_HDBCTLR 0x0C
+#define RTSX_HAIMR 0x10
+#define RTSX_BIPR 0x14
+#define RTSX_BIER 0x18
+
+/* Host command buffer control register */
+#define STOP_CMD (0x01 << 28)
+
+/* Host data buffer control register */
+#define SDMA_MODE 0x00
+#define ADMA_MODE (0x02 << 26)
+#define STOP_DMA (0x01 << 28)
+#define TRIG_DMA (0x01 << 31)
+
+/* Bus interrupt pending register */
+#define CMD_DONE_INT (1 << 31)
+#define DATA_DONE_INT (1 << 30)
+#define TRANS_OK_INT (1 << 29)
+#define TRANS_FAIL_INT (1 << 28)
+#define XD_INT (1 << 27)
+#define MS_INT (1 << 26)
+#define SD_INT (1 << 25)
+#define GPIO0_INT (1 << 24)
+#define OC_INT (1 << 23)
+#define SD_WRITE_PROTECT (1 << 19)
+#define XD_EXIST (1 << 18)
+#define MS_EXIST (1 << 17)
+#define SD_EXIST (1 << 16)
+#define DELINK_INT GPIO0_INT
+#define MS_OC_INT (1 << 23)
+#define SD_OC_INT (1 << 22)
+
+#define CARD_INT (XD_INT | MS_INT | SD_INT)
+#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
+#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT)
+
+#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
+
+/* Bus interrupt enable register */
+#define CMD_DONE_INT_EN (1 << 31)
+#define DATA_DONE_INT_EN (1 << 30)
+#define TRANS_OK_INT_EN (1 << 29)
+#define TRANS_FAIL_INT_EN (1 << 28)
+#define XD_INT_EN (1 << 27)
+#define MS_INT_EN (1 << 26)
+#define SD_INT_EN (1 << 25)
+#define GPIO0_INT_EN (1 << 24)
+#define OC_INT_EN (1 << 23)
+#define DELINK_INT_EN GPIO0_INT_EN
+#define MS_OC_INT_EN (1 << 23)
+#define SD_OC_INT_EN (1 << 22)
+
+
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
+#define HOST_TO_DEVICE 0
+#define DEVICE_TO_HOST 1
+
+
+#define RTSX_RESV_BUF_LEN 4096
+#define HOST_CMDS_BUF_LEN 1024
+#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+
+#define SD_NR 2
+#define MS_NR 3
+#define XD_NR 4
+#define SPI_NR 7
+#define SD_CARD (1 << SD_NR)
+#define MS_CARD (1 << MS_NR)
+#define XD_CARD (1 << XD_NR)
+#define SPI_CARD (1 << SPI_NR)
+
+#define MAX_ALLOWED_LUN_CNT 8
+
+#define XD_FREE_TABLE_CNT 1200
+#define MS_FREE_TABLE_CNT 512
+
+
+/* Bit Operation */
+#define SET_BIT(data, idx) ((data) |= 1 << (idx))
+#define CLR_BIT(data, idx) ((data) &= ~(1 << (idx)))
+#define CHK_BIT(data, idx) ((data) & (1 << (idx)))
+
+/* SG descriptor */
+#define SG_INT 0x04
+#define SG_END 0x02
+#define SG_VALID 0x01
+
+#define SG_NO_OP 0x00
+#define SG_TRANS_DATA (0x02 << 4)
+#define SG_LINK_DESC (0x03 << 4)
+
+struct rtsx_chip;
+
+typedef int (*card_rw_func)(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
+
+/* Supported Clock */
+enum card_clock {CLK_20 = 1, CLK_30, CLK_40, CLK_50, CLK_60,
+ CLK_80, CLK_100, CLK_120, CLK_150, CLK_200};
+
+enum RTSX_STAT {RTSX_STAT_INIT, RTSX_STAT_IDLE, RTSX_STAT_RUN, RTSX_STAT_SS,
+ RTSX_STAT_DELINK, RTSX_STAT_SUSPEND,
+ RTSX_STAT_ABORT, RTSX_STAT_DISCONNECT};
+enum IC_VER {IC_VER_AB, IC_VER_C = 2, IC_VER_D = 3};
+
+#define MAX_RESET_CNT 3
+
+/* For MS Card */
+#define MAX_DEFECTIVE_BLOCK 10
+
+struct zone_entry {
+ u16 *l2p_table;
+ u16 *free_table;
+ u16 defect_list[MAX_DEFECTIVE_BLOCK]; /* For MS card only */
+ int set_index;
+ int get_index;
+ int unused_blk_cnt;
+ int disable_count;
+ /* To indicate whether the L2P table of this zone has been built. */
+ int build_flag;
+};
+
+#define TYPE_SD 0x0000
+#define TYPE_MMC 0x0001
+
+/* TYPE_SD */
+#define SD_HS 0x0100
+#define SD_SDR50 0x0200
+#define SD_DDR50 0x0400
+#define SD_SDR104 0x0800
+#define SD_HCXC 0x1000
+
+/* TYPE_MMC */
+#define MMC_26M 0x0100
+#define MMC_52M 0x0200
+#define MMC_4BIT 0x0400
+#define MMC_8BIT 0x0800
+#define MMC_SECTOR_MODE 0x1000
+#define MMC_DDR52 0x2000
+
+/* SD card */
+#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD)
+#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS))
+#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50))
+#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50))
+#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104))
+#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC))
+#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000))
+#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000))
+#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card))
+
+#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD)
+#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS)
+#define SET_SD_SDR50(sd_card) ((sd_card)->sd_type |= SD_SDR50)
+#define SET_SD_DDR50(sd_card) ((sd_card)->sd_type |= SD_DDR50)
+#define SET_SD_SDR104(sd_card) ((sd_card)->sd_type |= SD_SDR104)
+#define SET_SD_HCXC(sd_card) ((sd_card)->sd_type |= SD_HCXC)
+
+#define CLR_SD_HS(sd_card) ((sd_card)->sd_type &= ~SD_HS)
+#define CLR_SD_SDR50(sd_card) ((sd_card)->sd_type &= ~SD_SDR50)
+#define CLR_SD_DDR50(sd_card) ((sd_card)->sd_type &= ~SD_DDR50)
+#define CLR_SD_SDR104(sd_card) ((sd_card)->sd_type &= ~SD_SDR104)
+#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC)
+
+/* MMC card */
+#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_MMC)
+#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M))
+#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M))
+#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT))
+#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT))
+#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE))
+#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52))
+
+#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC)
+#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M)
+#define SET_MMC_52M(sd_card) ((sd_card)->sd_type |= MMC_52M)
+#define SET_MMC_4BIT(sd_card) ((sd_card)->sd_type |= MMC_4BIT)
+#define SET_MMC_8BIT(sd_card) ((sd_card)->sd_type |= MMC_8BIT)
+#define SET_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type |= MMC_SECTOR_MODE)
+#define SET_MMC_DDR52(sd_card) ((sd_card)->sd_type |= MMC_DDR52)
+
+#define CLR_MMC_26M(sd_card) ((sd_card)->sd_type &= ~MMC_26M)
+#define CLR_MMC_52M(sd_card) ((sd_card)->sd_type &= ~MMC_52M)
+#define CLR_MMC_4BIT(sd_card) ((sd_card)->sd_type &= ~MMC_4BIT)
+#define CLR_MMC_8BIT(sd_card) ((sd_card)->sd_type &= ~MMC_8BIT)
+#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
+#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52)
+
+#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card))
+#define CLR_MMC_HS(sd_card) \
+do { \
+ CLR_MMC_DDR52(sd_card); \
+ CLR_MMC_52M(sd_card); \
+ CLR_MMC_26M(sd_card); \
+} while (0)
+
+#define SD_SUPPORT_CLASS_TEN 0x01
+#define SD_SUPPORT_1V8 0x02
+
+#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN)
+#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN)
+#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN)
+#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_1V8)
+#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_1V8)
+#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8)
+
+struct sd_info {
+ u16 sd_type;
+ u8 err_code;
+ u8 sd_data_buf_ready;
+ u32 sd_addr;
+ u32 capacity;
+
+ u8 raw_csd[16];
+ u8 raw_scr[8];
+
+ /* Sequential RW */
+ int seq_mode;
+ enum dma_data_direction pre_dir;
+ u32 pre_sec_addr;
+ u16 pre_sec_cnt;
+
+ int cleanup_counter;
+
+ int sd_clock;
+
+ int mmc_dont_switch_bus;
+
+#ifdef SUPPORT_CPRM
+ int sd_pass_thru_en;
+ int pre_cmd_err;
+ u8 last_rsp_type;
+ u8 rsp[17];
+#endif
+
+ u8 func_group1_mask;
+ u8 func_group2_mask;
+ u8 func_group3_mask;
+ u8 func_group4_mask;
+
+ u8 sd_switch_fail;
+ u8 sd_read_phase;
+
+#ifdef SUPPORT_SD_LOCK
+ u8 sd_lock_status;
+ u8 sd_erase_status;
+ u8 sd_lock_notify;
+#endif
+ int need_retune;
+};
+
+struct xd_delay_write_tag {
+ u32 old_phyblock;
+ u32 new_phyblock;
+ u32 logblock;
+ u8 pageoff;
+ u8 delay_write_flag;
+};
+
+struct xd_info {
+ u8 maker_code;
+ u8 device_code;
+ u8 block_shift;
+ u8 page_off;
+ u8 addr_cycle;
+ u16 cis_block;
+ u8 multi_flag;
+ u8 err_code;
+ u32 capacity;
+
+ struct zone_entry *zone;
+ int zone_cnt;
+
+ struct xd_delay_write_tag delay_write;
+ int cleanup_counter;
+
+ int xd_clock;
+};
+
+#define MODE_512_SEQ 0x01
+#define MODE_2K_SEQ 0x02
+
+#define TYPE_MS 0x0000
+#define TYPE_MSPRO 0x0001
+
+#define MS_4BIT 0x0100
+#define MS_8BIT 0x0200
+#define MS_HG 0x0400
+#define MS_XC 0x0800
+
+#define HG8BIT (MS_HG | MS_8BIT)
+
+#define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
+#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT))
+#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC))
+#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG))
+
+#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT))
+#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT))
+
+struct ms_delay_write_tag {
+ u16 old_phyblock;
+ u16 new_phyblock;
+ u16 logblock;
+ u8 pageoff;
+ u8 delay_write_flag;
+};
+
+struct ms_info {
+ u16 ms_type;
+ u8 block_shift;
+ u8 page_off;
+ u16 total_block;
+ u16 boot_block;
+ u32 capacity;
+
+ u8 check_ms_flow;
+ u8 switch_8bit_fail;
+ u8 err_code;
+
+ struct zone_entry *segment;
+ int segment_cnt;
+
+ int pro_under_formatting;
+ int format_status;
+ u16 progress;
+ u8 raw_sys_info[96];
+#ifdef SUPPORT_PCGL_1P18
+ u8 raw_model_name[48];
+#endif
+
+ u8 multi_flag;
+
+ /* Sequential RW */
+ u8 seq_mode;
+ enum dma_data_direction pre_dir;
+ u32 pre_sec_addr;
+ u16 pre_sec_cnt;
+ u32 total_sec_cnt;
+
+ struct ms_delay_write_tag delay_write;
+
+ int cleanup_counter;
+
+ int ms_clock;
+
+#ifdef SUPPORT_MAGIC_GATE
+ u8 magic_gate_id[16];
+ u8 mg_entry_num;
+ int mg_auth; /* flag to indicate authentication process */
+#endif
+};
+
+struct spi_info {
+ u8 use_clk;
+ u8 write_en;
+ u16 clk_div;
+ u8 err_code;
+
+ int spi_clock;
+};
+
+
+#ifdef _MSG_TRACE
+struct trace_msg_t {
+ u16 line;
+#define MSG_FUNC_LEN 64
+ char func[MSG_FUNC_LEN];
+#define MSG_FILE_LEN 32
+ char file[MSG_FILE_LEN];
+#define TIME_VAL_LEN 16
+ u8 timeval_buf[TIME_VAL_LEN];
+ u8 valid;
+};
+#endif
+
+/************/
+/* LUN mode */
+/************/
+/* Single LUN, support xD/SD/MS */
+#define DEFAULT_SINGLE 0
+/* 2 LUN mode, support SD/MS */
+#define SD_MS_2LUN 1
+/* Single LUN, but only support SD/MS, for Barossa LQFP */
+#define SD_MS_1LUN 2
+
+#define LAST_LUN_MODE 2
+
+/* Barossa package */
+#define QFN 0
+#define LQFP 1
+
+/******************/
+/* sd_ctl bit map */
+/******************/
+/* SD push point control, bit 0, 1 */
+#define SD_PUSH_POINT_CTL_MASK 0x03
+#define SD_PUSH_POINT_DELAY 0x01
+#define SD_PUSH_POINT_AUTO 0x02
+/* SD sample point control, bit 2, 3 */
+#define SD_SAMPLE_POINT_CTL_MASK 0x0C
+#define SD_SAMPLE_POINT_DELAY 0x04
+#define SD_SAMPLE_POINT_AUTO 0x08
+/* SD DDR Tx phase set by user, bit 4 */
+#define SD_DDR_TX_PHASE_SET_BY_USER 0x10
+/* MMC DDR Tx phase set by user, bit 5 */
+#define MMC_DDR_TX_PHASE_SET_BY_USER 0x20
+/* Support MMC DDR mode, bit 6 */
+#define SUPPORT_MMC_DDR_MODE 0x40
+/* Reset MMC at first */
+#define RESET_MMC_FIRST 0x80
+
+#define SEQ_START_CRITERIA 0x20
+
+/* MS Power Class En */
+#define POWER_CLASS_2_EN 0x02
+#define POWER_CLASS_1_EN 0x01
+
+#define MAX_SHOW_CNT 10
+#define MAX_RESET_CNT 3
+
+#define SDIO_EXIST 0x01
+#define SDIO_IGNORED 0x02
+
+#define CHK_SDIO_EXIST(chip) ((chip)->sdio_func_exist & SDIO_EXIST)
+#define SET_SDIO_EXIST(chip) ((chip)->sdio_func_exist |= SDIO_EXIST)
+#define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST)
+
+#define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED)
+#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= SDIO_IGNORED)
+#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED)
+
+struct rtsx_chip {
+ rtsx_dev_t *rtsx;
+
+ u32 int_reg; /* Bus interrupt pending register */
+ char max_lun;
+ void *context;
+
+ void *host_cmds_ptr; /* host commands buffer pointer */
+ dma_addr_t host_cmds_addr;
+ int ci; /* Command Index */
+
+ void *host_sg_tbl_ptr; /* SG descriptor table */
+ dma_addr_t host_sg_tbl_addr;
+ int sgi; /* SG entry index */
+
+ struct scsi_cmnd *srb; /* current srb */
+ struct sense_data_t sense_buffer[MAX_ALLOWED_LUN_CNT];
+
+ int cur_clk; /* current card clock */
+
+ /* Current accessed card */
+ int cur_card;
+
+ unsigned long need_release; /* need release bit map */
+ unsigned long need_reset; /* need reset
+ * bit map */
+ /* Flag to indicate that this card is just resumed from SS state,
+ * and need released before being resetted
+ */
+ unsigned long need_reinit;
+
+ int rw_need_retry;
+
+#ifdef SUPPORT_OCP
+ u32 ocp_int;
+ u8 ocp_stat;
+#endif
+
+ u8 card_exist; /* card exist bit map (physical exist) */
+ u8 card_ready; /* card ready bit map (reset successfully) */
+ u8 card_fail; /* card reset fail bit map */
+ u8 card_ejected; /* card ejected bit map */
+ u8 card_wp; /* card write protected bit map */
+
+ u8 lun_mc; /* flag to indicate whether to answer
+ * MediaChange */
+
+#ifndef LED_AUTO_BLINK
+ int led_toggle_counter;
+#endif
+
+ int sd_reset_counter;
+ int xd_reset_counter;
+ int ms_reset_counter;
+
+ /* card bus width */
+ u8 card_bus_width[MAX_ALLOWED_LUN_CNT];
+ /* card capacity */
+ u32 capacity[MAX_ALLOWED_LUN_CNT];
+ /* read/write card function pointer */
+ card_rw_func rw_card[MAX_ALLOWED_LUN_CNT];
+ /* read/write capacity, used for GPIO Toggle */
+ u32 rw_cap[MAX_ALLOWED_LUN_CNT];
+ /* card to lun mapping table */
+ u8 card2lun[32];
+ /* lun to card mapping table */
+ u8 lun2card[MAX_ALLOWED_LUN_CNT];
+
+ int rw_fail_cnt[MAX_ALLOWED_LUN_CNT];
+
+ int sd_show_cnt;
+ int xd_show_cnt;
+ int ms_show_cnt;
+
+ /* card information */
+ struct sd_info sd_card;
+ struct xd_info xd_card;
+ struct ms_info ms_card;
+
+ struct spi_info spi;
+
+#ifdef _MSG_TRACE
+ struct trace_msg_t trace_msg[TRACE_ITEM_CNT];
+ int msg_idx;
+#endif
+
+ int auto_delink_cnt;
+ int auto_delink_allowed;
+
+ int aspm_enabled;
+
+ int sdio_aspm;
+ int sdio_idle;
+ int sdio_counter;
+ u8 sdio_raw_data[12];
+
+ u8 sd_io;
+ u8 sd_int;
+
+ u8 rtsx_flag;
+
+ int ss_counter;
+ int idle_counter;
+ enum RTSX_STAT rtsx_stat;
+
+ u16 vendor_id;
+ u16 product_id;
+ u8 ic_version;
+
+ int driver_first_load;
+
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ int sdio_in_charge;
+#endif
+
+ u8 aspm_level[2];
+
+ int chip_insert_with_sdio;
+
+ /* Options */
+
+ int adma_mode;
+
+ int auto_delink_en;
+ int ss_en;
+ u8 lun_mode;
+ u8 aspm_l0s_l1_en;
+
+ int power_down_in_ss;
+
+ int sdr104_en;
+ int ddr50_en;
+ int sdr50_en;
+
+ int baro_pkg;
+
+ int asic_code;
+ int phy_debug_mode;
+ int hw_bypass_sd;
+ int sdio_func_exist;
+ int aux_pwr_exist;
+ u8 ms_power_class_en;
+
+ int mspro_formatter_enable;
+
+ int remote_wakeup_en;
+
+ int ignore_sd;
+ int use_hw_setting;
+
+ int ss_idle_period;
+
+ int dynamic_aspm;
+
+ int fpga_sd_sdr104_clk;
+ int fpga_sd_ddr50_clk;
+ int fpga_sd_sdr50_clk;
+ int fpga_sd_hs_clk;
+ int fpga_mmc_52m_clk;
+ int fpga_ms_hg_clk;
+ int fpga_ms_4bit_clk;
+ int fpga_ms_1bit_clk;
+
+ int asic_sd_sdr104_clk;
+ int asic_sd_ddr50_clk;
+ int asic_sd_sdr50_clk;
+ int asic_sd_hs_clk;
+ int asic_mmc_52m_clk;
+ int asic_ms_hg_clk;
+ int asic_ms_4bit_clk;
+ int asic_ms_1bit_clk;
+
+ u8 ssc_depth_sd_sdr104;
+ u8 ssc_depth_sd_ddr50;
+ u8 ssc_depth_sd_sdr50;
+ u8 ssc_depth_sd_hs;
+ u8 ssc_depth_mmc_52m;
+ u8 ssc_depth_ms_hg;
+ u8 ssc_depth_ms_4bit;
+ u8 ssc_depth_low_speed;
+
+ u8 card_drive_sel;
+ u8 sd30_drive_sel_1v8;
+ u8 sd30_drive_sel_3v3;
+
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+ u8 ms_ocp_thd;
+
+ int ssc_en;
+ int msi_en;
+
+ int xd_timeout;
+ int sd_timeout;
+ int ms_timeout;
+ int mspro_timeout;
+
+ int auto_power_down;
+
+ int sd_ddr_tx_phase;
+ int mmc_ddr_tx_phase;
+ int sd_default_tx_phase;
+ int sd_default_rx_phase;
+
+ int pmos_pwr_on_interval;
+ int sd_voltage_switch_delay;
+ int s3_pwr_off_delay;
+
+ int force_clkreq_0;
+ int ft2_fast_mode;
+
+ int do_delink_before_power_down;
+ int polling_config;
+ int sdio_retry_cnt;
+
+ int delink_stage1_step;
+ int delink_stage2_step;
+ int delink_stage3_step;
+
+ int auto_delink_in_L1;
+ int hp_watch_bios_hotplug;
+ int support_ms_8bit;
+
+ u8 blink_led;
+ u8 phy_voltage;
+ u8 max_payload;
+
+ u32 sd_speed_prior;
+ u32 sd_current_prior;
+ u32 sd_ctl;
+};
+
+#define rtsx_set_stat(chip, stat) \
+do { \
+ if ((stat) != RTSX_STAT_IDLE) { \
+ (chip)->idle_counter = 0; \
+ } \
+ (chip)->rtsx_stat = (enum RTSX_STAT)(stat); \
+} while (0)
+#define rtsx_get_stat(chip) ((chip)->rtsx_stat)
+#define rtsx_chk_stat(chip, stat) ((chip)->rtsx_stat == (stat))
+
+#define RTSX_SET_DELINK(chip) ((chip)->rtsx_flag |= 0x01)
+#define RTSX_CLR_DELINK(chip) ((chip)->rtsx_flag &= 0xFE)
+#define RTSX_TST_DELINK(chip) ((chip)->rtsx_flag & 0x01)
+
+#define CHECK_PID(chip, pid) ((chip)->product_id == (pid))
+#define CHECK_BARO_PKG(chip, pkg) ((chip)->baro_pkg == (pkg))
+#define CHECK_LUN_MODE(chip, mode) ((chip)->lun_mode == (mode))
+
+/* Power down control */
+#define SSC_PDCTL 0x01
+#define OC_PDCTL 0x02
+
+int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl);
+int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl);
+
+void rtsx_disable_card_int(struct rtsx_chip *chip);
+void rtsx_enable_card_int(struct rtsx_chip *chip);
+void rtsx_enable_bus_int(struct rtsx_chip *chip);
+void rtsx_disable_bus_int(struct rtsx_chip *chip);
+int rtsx_reset_chip(struct rtsx_chip *chip);
+int rtsx_init_chip(struct rtsx_chip *chip);
+void rtsx_release_chip(struct rtsx_chip *chip);
+void rtsx_polling_func(struct rtsx_chip *chip);
+void rtsx_undo_delink(struct rtsx_chip *chip);
+void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
+int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
+int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
+int rtsx_write_cfg_dw(struct rtsx_chip *chip,
+ u8 func_no, u16 addr, u32 mask, u32 val);
+int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val);
+int rtsx_write_cfg_seq(struct rtsx_chip *chip,
+ u8 func, u16 addr, u8 *buf, int len);
+int rtsx_read_cfg_seq(struct rtsx_chip *chip,
+ u8 func, u16 addr, u8 *buf, int len);
+int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val);
+int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val);
+int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
+int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val);
+int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
+int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
+int rtsx_check_link_ready(struct rtsx_chip *chip);
+void rtsx_enter_ss(struct rtsx_chip *chip);
+void rtsx_exit_ss(struct rtsx_chip *chip);
+int rtsx_pre_handle_interrupt(struct rtsx_chip *chip);
+void rtsx_enter_L1(struct rtsx_chip *chip);
+void rtsx_exit_L1(struct rtsx_chip *chip);
+void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat);
+void rtsx_enable_aspm(struct rtsx_chip *chip);
+void rtsx_disable_aspm(struct rtsx_chip *chip);
+int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len);
+int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len);
+int rtsx_check_chip_exist(struct rtsx_chip *chip);
+
+#define RTSX_WRITE_REG(chip, addr, mask, data) \
+ do { \
+ int retval = rtsx_write_register((chip), (addr), (mask), (data)); \
+ if (retval != STATUS_SUCCESS) { \
+ TRACE_RET((chip), retval); \
+ } \
+ } while (0)
+
+#define RTSX_READ_REG(chip, addr, data) \
+ do { \
+ int retval = rtsx_read_register((chip), (addr), (data)); \
+ if (retval != STATUS_SUCCESS) { \
+ TRACE_RET((chip), retval); \
+ } \
+ } while (0)
+
+#endif /* __REALTEK_RTSX_CHIP_H */
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
new file mode 100644
index 0000000..382e73a
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -0,0 +1,3370 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_sys.h"
+#include "rtsx_card.h"
+#include "rtsx_chip.h"
+#include "rtsx_scsi.h"
+#include "sd.h"
+#include "ms.h"
+#include "spi.h"
+
+void scsi_show_command(struct scsi_cmnd *srb)
+{
+ char *what = NULL;
+ int i, unknown_cmd = 0;
+
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ what = "TEST_UNIT_READY";
+ break;
+ case REZERO_UNIT:
+ what = "REZERO_UNIT";
+ break;
+ case REQUEST_SENSE:
+ what = "REQUEST_SENSE";
+ break;
+ case FORMAT_UNIT:
+ what = "FORMAT_UNIT";
+ break;
+ case READ_BLOCK_LIMITS:
+ what = "READ_BLOCK_LIMITS";
+ break;
+ case REASSIGN_BLOCKS:
+ what = "REASSIGN_BLOCKS";
+ break;
+ case READ_6:
+ what = "READ_6";
+ break;
+ case WRITE_6:
+ what = "WRITE_6";
+ break;
+ case SEEK_6:
+ what = "SEEK_6";
+ break;
+ case READ_REVERSE:
+ what = "READ_REVERSE";
+ break;
+ case WRITE_FILEMARKS:
+ what = "WRITE_FILEMARKS";
+ break;
+ case SPACE:
+ what = "SPACE";
+ break;
+ case INQUIRY:
+ what = "INQUIRY";
+ break;
+ case RECOVER_BUFFERED_DATA:
+ what = "RECOVER_BUFFERED_DATA";
+ break;
+ case MODE_SELECT:
+ what = "MODE_SELECT";
+ break;
+ case RESERVE:
+ what = "RESERVE";
+ break;
+ case RELEASE:
+ what = "RELEASE";
+ break;
+ case COPY:
+ what = "COPY";
+ break;
+ case ERASE:
+ what = "ERASE";
+ break;
+ case MODE_SENSE:
+ what = "MODE_SENSE";
+ break;
+ case START_STOP:
+ what = "START_STOP";
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ what = "RECEIVE_DIAGNOSTIC";
+ break;
+ case SEND_DIAGNOSTIC:
+ what = "SEND_DIAGNOSTIC";
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ what = "ALLOW_MEDIUM_REMOVAL";
+ break;
+ case SET_WINDOW:
+ what = "SET_WINDOW";
+ break;
+ case READ_CAPACITY:
+ what = "READ_CAPACITY";
+ break;
+ case READ_10:
+ what = "READ_10";
+ break;
+ case WRITE_10:
+ what = "WRITE_10";
+ break;
+ case SEEK_10:
+ what = "SEEK_10";
+ break;
+ case WRITE_VERIFY:
+ what = "WRITE_VERIFY";
+ break;
+ case VERIFY:
+ what = "VERIFY";
+ break;
+ case SEARCH_HIGH:
+ what = "SEARCH_HIGH";
+ break;
+ case SEARCH_EQUAL:
+ what = "SEARCH_EQUAL";
+ break;
+ case SEARCH_LOW:
+ what = "SEARCH_LOW";
+ break;
+ case SET_LIMITS:
+ what = "SET_LIMITS";
+ break;
+ case READ_POSITION:
+ what = "READ_POSITION";
+ break;
+ case SYNCHRONIZE_CACHE:
+ what = "SYNCHRONIZE_CACHE";
+ break;
+ case LOCK_UNLOCK_CACHE:
+ what = "LOCK_UNLOCK_CACHE";
+ break;
+ case READ_DEFECT_DATA:
+ what = "READ_DEFECT_DATA";
+ break;
+ case MEDIUM_SCAN:
+ what = "MEDIUM_SCAN";
+ break;
+ case COMPARE:
+ what = "COMPARE";
+ break;
+ case COPY_VERIFY:
+ what = "COPY_VERIFY";
+ break;
+ case WRITE_BUFFER:
+ what = "WRITE_BUFFER";
+ break;
+ case READ_BUFFER:
+ what = "READ_BUFFER";
+ break;
+ case UPDATE_BLOCK:
+ what = "UPDATE_BLOCK";
+ break;
+ case READ_LONG:
+ what = "READ_LONG";
+ break;
+ case WRITE_LONG:
+ what = "WRITE_LONG";
+ break;
+ case CHANGE_DEFINITION:
+ what = "CHANGE_DEFINITION";
+ break;
+ case WRITE_SAME:
+ what = "WRITE_SAME";
+ break;
+ case GPCMD_READ_SUBCHANNEL:
+ what = "READ SUBCHANNEL";
+ break;
+ case READ_TOC:
+ what = "READ_TOC";
+ break;
+ case GPCMD_READ_HEADER:
+ what = "READ HEADER";
+ break;
+ case GPCMD_PLAY_AUDIO_10:
+ what = "PLAY AUDIO (10)";
+ break;
+ case GPCMD_PLAY_AUDIO_MSF:
+ what = "PLAY AUDIO MSF";
+ break;
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ what = "GET EVENT/STATUS NOTIFICATION";
+ break;
+ case GPCMD_PAUSE_RESUME:
+ what = "PAUSE/RESUME";
+ break;
+ case LOG_SELECT:
+ what = "LOG_SELECT";
+ break;
+ case LOG_SENSE:
+ what = "LOG_SENSE";
+ break;
+ case GPCMD_STOP_PLAY_SCAN:
+ what = "STOP PLAY/SCAN";
+ break;
+ case GPCMD_READ_DISC_INFO:
+ what = "READ DISC INFORMATION";
+ break;
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ what = "READ TRACK INFORMATION";
+ break;
+ case GPCMD_RESERVE_RZONE_TRACK:
+ what = "RESERVE TRACK";
+ break;
+ case GPCMD_SEND_OPC:
+ what = "SEND OPC";
+ break;
+ case MODE_SELECT_10:
+ what = "MODE_SELECT_10";
+ break;
+ case GPCMD_REPAIR_RZONE_TRACK:
+ what = "REPAIR TRACK";
+ break;
+ case 0x59:
+ what = "READ MASTER CUE";
+ break;
+ case MODE_SENSE_10:
+ what = "MODE_SENSE_10";
+ break;
+ case GPCMD_CLOSE_TRACK:
+ what = "CLOSE TRACK/SESSION";
+ break;
+ case 0x5C:
+ what = "READ BUFFER CAPACITY";
+ break;
+ case 0x5D:
+ what = "SEND CUE SHEET";
+ break;
+ case GPCMD_BLANK:
+ what = "BLANK";
+ break;
+ case REPORT_LUNS:
+ what = "REPORT LUNS";
+ break;
+ case MOVE_MEDIUM:
+ what = "MOVE_MEDIUM or PLAY AUDIO (12)";
+ break;
+ case READ_12:
+ what = "READ_12";
+ break;
+ case WRITE_12:
+ what = "WRITE_12";
+ break;
+ case WRITE_VERIFY_12:
+ what = "WRITE_VERIFY_12";
+ break;
+ case SEARCH_HIGH_12:
+ what = "SEARCH_HIGH_12";
+ break;
+ case SEARCH_EQUAL_12:
+ what = "SEARCH_EQUAL_12";
+ break;
+ case SEARCH_LOW_12:
+ what = "SEARCH_LOW_12";
+ break;
+ case SEND_VOLUME_TAG:
+ what = "SEND_VOLUME_TAG";
+ break;
+ case READ_ELEMENT_STATUS:
+ what = "READ_ELEMENT_STATUS";
+ break;
+ case GPCMD_READ_CD_MSF:
+ what = "READ CD MSF";
+ break;
+ case GPCMD_SCAN:
+ what = "SCAN";
+ break;
+ case GPCMD_SET_SPEED:
+ what = "SET CD SPEED";
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ what = "MECHANISM STATUS";
+ break;
+ case GPCMD_READ_CD:
+ what = "READ CD";
+ break;
+ case 0xE1:
+ what = "WRITE CONTINUE";
+ break;
+ case WRITE_LONG_2:
+ what = "WRITE_LONG_2";
+ break;
+ case VENDOR_CMND:
+ what = "Realtek's vendor command";
+ break;
+ default:
+ what = "(unknown command)"; unknown_cmd = 1;
+ break;
+ }
+
+ if (srb->cmnd[0] != TEST_UNIT_READY)
+ RTSX_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
+
+ if (unknown_cmd) {
+ RTSX_DEBUGP("");
+ for (i = 0; i < srb->cmd_len && i < 16; i++)
+ RTSX_DEBUGPN(" %02x", srb->cmnd[i]);
+ RTSX_DEBUGPN("\n");
+ }
+}
+
+void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
+{
+ switch (sense_type) {
+ case SENSE_TYPE_MEDIA_CHANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_NOT_PRESENT:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_PROTECT:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
+ set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ break;
+
+ case SENSE_TYPE_FORMAT_IN_PROGRESS:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
+ break;
+
+ case SENSE_TYPE_FORMAT_CMD_FAILED:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
+ break;
+
+#ifdef SUPPORT_MAGIC_GATE
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
+ break;
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
+ break;
+#endif
+
+ case SENSE_TYPE_NO_SENSE:
+ default:
+ set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+}
+
+void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u16 sns_key_info1)
+{
+ struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+
+ sense->err_code = err_code;
+ sense->sense_key = sense_key;
+ sense->info[0] = (u8)(info >> 24);
+ sense->info[1] = (u8)(info >> 16);
+ sense->info[2] = (u8)(info >> 8);
+ sense->info[3] = (u8)info;
+
+ sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
+ sense->asc = asc;
+ sense->ascq = ascq;
+ if (sns_key_info0 != 0) {
+ sense->sns_key_info[0] = SKSV | sns_key_info0;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ }
+}
+
+static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ return TRANSPORT_FAILED;
+ }
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+ if (sd_card->sd_lock_notify) {
+ sd_card->sd_lock_notify = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ } else if (sd_card->sd_lock_status & SD_LOCKED) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ return TRANSPORT_FAILED;
+ }
+ }
+#endif
+
+ return TRANSPORT_GOOD;
+}
+
+static unsigned char formatter_inquiry_str[20] = {
+ 'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
+#ifdef SUPPORT_MAGIC_GATE
+ '-', 'M', 'G', /* Byte[47:49] */
+#else
+ 0x20, 0x20, 0x20, /* Byte[47:49] */
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ 0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
+#else
+ 0x09, /* Byte[50]: MS, MSPro, MSXC */
+#endif
+ 0x00, /* Byte[51]: Category Specific Commands */
+ 0x00, /* Byte[52]: Access Control and feature */
+ 0x20, 0x20, 0x20, /* Byte[53:55] */
+};
+
+static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
+ char *inquiry_sdms = (char *)"Generic-SD/MemoryStick 1.00 ";
+ char *inquiry_sd = (char *)"Generic-SD/MMC 1.00 ";
+ char *inquiry_ms = (char *)"Generic-MemoryStick 1.00 ";
+ char *inquiry_string;
+ unsigned char sendbytes;
+ unsigned char *buf;
+ u8 card = get_lun_card(chip, lun);
+ int pro_formatter_flag = 0;
+ unsigned char inquiry_buf[] = {
+ QULIFIRE|DRCT_ACCESS_DEV,
+ RMB_DISC|0x0D,
+ 0x00,
+ 0x01,
+ 0x1f,
+ 0x02,
+ 0,
+ REL_ADR|WBUS_32|WBUS_16|SYNC|LINKED|CMD_QUE|SFT_RE,
+ };
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ if (chip->lun2card[lun] == SD_CARD)
+ inquiry_string = inquiry_sd;
+ else
+ inquiry_string = inquiry_ms;
+
+ } else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
+ inquiry_string = inquiry_sdms;
+ } else {
+ inquiry_string = inquiry_default;
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+#ifdef SUPPORT_MAGIC_GATE
+ if ((chip->mspro_formatter_enable) &&
+ (chip->lun2card[lun] & MS_CARD))
+#else
+ if (chip->mspro_formatter_enable)
+#endif
+ {
+ if (!card || (card == MS_CARD))
+ pro_formatter_flag = 1;
+ }
+
+ if (pro_formatter_flag) {
+ if (scsi_bufflen(srb) < 56)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 56;
+
+ } else {
+ if (scsi_bufflen(srb) < 36)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 36;
+ }
+
+ if (sendbytes > 8) {
+ memcpy(buf, inquiry_buf, 8);
+ memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ if (pro_formatter_flag) {
+ /* Additional Length */
+ buf[4] = 0x33;
+ }
+ } else {
+ memcpy(buf, inquiry_buf, sendbytes);
+ }
+
+ if (pro_formatter_flag) {
+ if (sendbytes > 36)
+ memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+
+static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ scsi_set_resid(srb, scsi_bufflen(srb));
+
+ if (srb->cmnd[1] == 1)
+ return TRANSPORT_GOOD;
+
+ switch (srb->cmnd[0x4]) {
+ case STOP_MEDIUM:
+ /* Media disabled */
+ return TRANSPORT_GOOD;
+
+ case UNLOAD_MEDIUM:
+ /* Media shall be unload */
+ if (check_card_ready(chip, lun))
+ eject_card(chip, lun);
+ return TRANSPORT_GOOD;
+
+ case MAKE_MEDIUM_READY:
+ case LOAD_MEDIUM:
+ if (check_card_ready(chip, lun)) {
+ return TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ break;
+ }
+
+ TRACE_RET(chip, TRANSPORT_ERROR);
+}
+
+
+static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int prevent;
+
+ prevent = srb->cmnd[4] & 0x1;
+
+ scsi_set_resid(srb, 0);
+
+ if (prevent) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+
+static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sense_data_t *sense;
+ unsigned int lun = SCSI_LUN(srb);
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned char *tmp, *buf;
+
+ sense = &(chip->sense_buffer[lun]);
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ ms_card->pro_under_formatting) {
+ if (ms_card->format_status == FORMAT_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ } else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16)(ms_card->progress));
+ } else {
+ /* Format Command Failed */
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ }
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ tmp = (unsigned char *)sense;
+ memcpy(buf, tmp, scsi_bufflen(srb));
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ /* Reset Sense Data */
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ return TRANSPORT_GOOD;
+}
+
+static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
+ int lun, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int sys_info_offset;
+ int data_size = buf_len;
+ int support_format = 0;
+ int i = 0;
+
+ if (cmd == MODE_SENSE) {
+ sys_info_offset = 8;
+ if (data_size > 0x68)
+ data_size = 0x68;
+
+ buf[i++] = 0x67; /* Mode Data Length */
+ } else {
+ sys_info_offset = 12;
+ if (data_size > 0x6C)
+ data_size = 0x6C;
+
+ buf[i++] = 0x00; /* Mode Data Length (MSB) */
+ buf[i++] = 0x6A; /* Mode Data Length (LSB) */
+ }
+
+ /* Medium Type Code */
+ if (check_card_ready(chip, lun)) {
+ if (CHK_MSXC(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x40;
+ } else if (CHK_MSPRO(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x20;
+ } else {
+ buf[i++] = 0x10;
+ }
+
+ /* WP */
+ if (check_card_wp(chip, lun))
+ buf[i++] = 0x80;
+ else
+ buf[i++] = 0x00;
+
+ } else {
+ buf[i++] = 0x00; /* MediaType */
+ buf[i++] = 0x00; /* WP */
+ }
+
+ buf[i++] = 0x00; /* Reserved */
+
+ if (cmd == MODE_SENSE_10) {
+ buf[i++] = 0x00; /* Reserved */
+ buf[i++] = 0x00; /* Block descriptor length(MSB) */
+ buf[i++] = 0x00; /* Block descriptor length(LSB) */
+
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 9)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 10)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 11)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 12) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ } else {
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 5)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 6)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 7)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 8) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ }
+
+ if (data_size > sys_info_offset) {
+ /* 96 Bytes Attribute Data */
+ int len = data_size - sys_info_offset;
+ len = (len < 96) ? len : 96;
+
+ memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
+ }
+}
+
+static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int dataSize;
+ int status;
+ int pro_formatter_flag;
+ unsigned char pageCode, *buf;
+ u8 card = get_lun_card(chip, lun);
+
+#ifndef SUPPORT_MAGIC_GATE
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#endif
+
+ pro_formatter_flag = 0;
+ dataSize = 8;
+#ifdef SUPPORT_MAGIC_GATE
+ if ((chip->lun2card[lun] & MS_CARD)) {
+ if (!card || (card == MS_CARD)) {
+ dataSize = 108;
+ if (chip->mspro_formatter_enable)
+ pro_formatter_flag = 1;
+ }
+ }
+#else
+ if (card == MS_CARD) {
+ if (chip->mspro_formatter_enable) {
+ pro_formatter_flag = 1;
+ dataSize = 108;
+ }
+ }
+#endif
+
+ buf = kmalloc(dataSize, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ pageCode = srb->cmnd[2] & 0x3f;
+
+ if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
+ (pageCode == 0x00) ||
+ (pro_formatter_flag && (pageCode == 0x20))) {
+ if (srb->cmnd[0] == MODE_SENSE) {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0],
+ lun, buf, dataSize);
+ } else {
+ dataSize = 4;
+ buf[0] = 0x03;
+ buf[1] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[2] = 0x80;
+ else
+ buf[2] = 0x00;
+
+ buf[3] = 0x00;
+ }
+ } else {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0],
+ lun, buf, dataSize);
+ } else {
+ dataSize = 8;
+ buf[0] = 0x00;
+ buf[1] = 0x06;
+ buf[2] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[3] = 0x80;
+ else
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+ }
+ }
+ status = TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ status = TRANSPORT_FAILED;
+ }
+
+ if (status == TRANSPORT_GOOD) {
+ unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
+ dataSize);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+ }
+ kfree(buf);
+
+ return status;
+}
+
+static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u32 start_sec;
+ u16 sec_cnt;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ if (!check_card_ready(chip, lun) || (get_card_size(chip, lun) == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Accessing to any card is forbidden
+ * until the erase procedure of SD is completed
+ */
+ RTSX_DEBUGP("SD card being erased!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) == SD_CARD) {
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ RTSX_DEBUGP("SD card locked!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ start_sec = ((u32)srb->cmnd[2] << 24) |
+ ((u32)srb->cmnd[3] << 16) |
+ ((u32)srb->cmnd[4] << 8) | ((u32)srb->cmnd[5]);
+ sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ start_sec = ((u32)(srb->cmnd[1] & 0x1F) << 16) |
+ ((u32)srb->cmnd[2] << 8) | ((u32)srb->cmnd[3]);
+ sec_cnt = srb->cmnd[4];
+ } else if ((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ ((srb->cmnd[2] == PP_READ10) || (srb->cmnd[2] == PP_WRITE10))) {
+ start_sec = ((u32)srb->cmnd[4] << 24) |
+ ((u32)srb->cmnd[5] << 16) |
+ ((u32)srb->cmnd[6] << 8) | ((u32)srb->cmnd[7]);
+ sec_cnt = ((u16)(srb->cmnd[9]) << 8) | srb->cmnd[10];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ /* In some test, we will receive a start_sec like 0xFFFFFFFF.
+ * In this situation, start_sec + sec_cnt will overflow, so we
+ * need to judge start_sec at first
+ */
+ if ((start_sec > get_card_size(chip, lun)) ||
+ ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sec_cnt == 0) {
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ if (chip->rw_fail_cnt[lun] == 3) {
+ RTSX_DEBUGP("read/write fail three times in succession\n");
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (check_card_wp(chip, lun)) {
+ RTSX_DEBUGP("Write protected card!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ retval = card_rw(srb, chip, start_sec, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+ if (chip->need_release & chip->lun2card[lun]) {
+ chip->rw_fail_cnt[lun] = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ } else {
+ chip->rw_fail_cnt[lun]++;
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ }
+ retval = TRANSPORT_FAILED;
+ TRACE_GOTO(chip, Exit);
+ } else {
+ chip->rw_fail_cnt[lun] = 0;
+ retval = TRANSPORT_GOOD;
+ }
+
+ scsi_set_resid(srb, 0);
+
+Exit:
+ return retval;
+}
+
+static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 card = get_lun_card(chip, lun);
+ u32 card_size;
+ int desc_cnt;
+ int i = 0;
+
+ if (!check_card_ready(chip, lun)) {
+ if (!chip->mspro_formatter_enable) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[i++] = 0;
+ buf[i++] = 0;
+ buf[i++] = 0;
+
+ /* Capacity List Length */
+ if ((buf_len > 12) && chip->mspro_formatter_enable &&
+ (chip->lun2card[lun] & MS_CARD) &&
+ (!card || (card == MS_CARD))) {
+ buf[i++] = 0x10;
+ desc_cnt = 2;
+ } else {
+ buf[i++] = 0x08;
+ desc_cnt = 1;
+ }
+
+ while (desc_cnt) {
+ if (check_card_ready(chip, lun)) {
+ card_size = get_card_size(chip, lun);
+ buf[i++] = (unsigned char)(card_size >> 24);
+ buf[i++] = (unsigned char)(card_size >> 16);
+ buf[i++] = (unsigned char)(card_size >> 8);
+ buf[i++] = (unsigned char)card_size;
+
+ if (desc_cnt == 2)
+ buf[i++] = 2;
+ else
+ buf[i++] = 0;
+ } else {
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+
+ if (desc_cnt == 2)
+ buf[i++] = 3;
+ else
+ buf[i++] = 0;
+ }
+
+ buf[i++] = 0x00;
+ buf[i++] = 0x02;
+ buf[i++] = 0x00;
+
+ desc_cnt--;
+ }
+
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), buf_len);
+ rtsx_stor_set_xfer_buf(buf, buf_len, srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ u32 card_size;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ buf = kmalloc(8, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ card_size = get_card_size(chip, lun);
+ buf[0] = (unsigned char)((card_size - 1) >> 24);
+ buf[1] = (unsigned char)((card_size - 1) >> 16);
+ buf[2] = (unsigned char)((card_size - 1) >> 8);
+ buf[3] = (unsigned char)(card_size - 1);
+
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x02;
+ buf[7] = 0x00;
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = spi_read_eeprom(chip, i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (len == 511) {
+ retval = spi_erase_eeprom_chip(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else {
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
+ len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ for (i = 0; i < len; i++) {
+ retval = spi_write_eeprom(chip, i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xFC00) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_read_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xFC00) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_write_register(chip, addr + i, 0xFF, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) != SD_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rtsx_stor_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int toggle_gpio_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 gpio = srb->cmnd[2];
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ if (gpio > 3)
+ gpio = 1;
+ toggle_gpio(chip, gpio);
+
+ return TRANSPORT_GOOD;
+}
+
+#ifdef _MSG_TRACE
+static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned char *ptr, *buf = NULL;
+ int i, msg_cnt;
+ u8 clear;
+ unsigned int buf_len;
+
+ buf_len = 4 + ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) *
+ TRACE_ITEM_CNT);
+
+ if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ clear = srb->cmnd[2];
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ ptr = buf;
+
+ if (chip->trace_msg[chip->msg_idx].valid)
+ msg_cnt = TRACE_ITEM_CNT;
+ else
+ msg_cnt = chip->msg_idx;
+
+ *(ptr++) = (u8)(msg_cnt >> 24);
+ *(ptr++) = (u8)(msg_cnt >> 16);
+ *(ptr++) = (u8)(msg_cnt >> 8);
+ *(ptr++) = (u8)msg_cnt;
+ RTSX_DEBUGP("Trace message count is %d\n", msg_cnt);
+
+ for (i = 1; i <= msg_cnt; i++) {
+ int j, idx;
+
+ idx = chip->msg_idx - i;
+ if (idx < 0)
+ idx += TRACE_ITEM_CNT;
+
+ *(ptr++) = (u8)(chip->trace_msg[idx].line >> 8);
+ *(ptr++) = (u8)(chip->trace_msg[idx].line);
+ for (j = 0; j < MSG_FUNC_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].func[j];
+
+ for (j = 0; j < MSG_FILE_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].file[j];
+
+ for (j = 0; j < TIME_VAL_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].timeval_buf[j];
+ }
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ if (clear) {
+ chip->msg_idx = 0;
+ for (i = 0; i < TRACE_ITEM_CNT; i++)
+ chip->trace_msg[i].valid = 0;
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+static int read_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 addr, buf[4];
+ u32 val;
+ unsigned int len;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+
+ val = rtsx_readl(chip, addr);
+ RTSX_DEBUGP("Host register (0x%x): 0x%x\n", addr, val);
+
+ buf[0] = (u8)(val >> 24);
+ buf[1] = (u8)(val >> 16);
+ buf[2] = (u8)(val >> 8);
+ buf[3] = (u8)val;
+
+ len = min_t(unsigned int, scsi_bufflen(srb), 4);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 addr, buf[4];
+ u32 val;
+ unsigned int len;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+
+ len = min_t(unsigned int, scsi_bufflen(srb), 4);
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ val = ((u32)buf[0] << 24) | ((u32)buf[1] << 16) | ((u32)buf[2]
+ << 8) | buf[3];
+
+ rtsx_writel(chip, addr, val);
+
+ return TRANSPORT_GOOD;
+}
+
+static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned lun = SCSI_LUN(srb);
+
+ if (srb->cmnd[3] == 1) {
+ /* Variable Clock */
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ switch (srb->cmnd[4]) {
+ case XD_CARD:
+ xd_card->xd_clock = srb->cmnd[5];
+ break;
+
+ case SD_CARD:
+ sd_card->sd_clock = srb->cmnd[5];
+ break;
+
+ case MS_CARD:
+ ms_card->ms_clock = srb->cmnd[5];
+ break;
+
+ default:
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else if (srb->cmnd[3] == 2) {
+ if (srb->cmnd[4]) {
+ chip->blink_led = 1;
+ } else {
+ int retval;
+
+ chip->blink_led = 0;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en &&
+ (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ turn_off_led(chip, LED_GPIO);
+ }
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (srb->cmnd[3] == 1) {
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ u8 tmp;
+
+ switch (srb->cmnd[4]) {
+ case XD_CARD:
+ tmp = (u8)(xd_card->xd_clock);
+ break;
+
+ case SD_CARD:
+ tmp = (u8)(sd_card->sd_clock);
+ break;
+
+ case MS_CARD:
+ tmp = (u8)(ms_card->ms_clock);
+ break;
+
+ default:
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_stor_set_xfer_buf(&tmp, 1, srb);
+ } else if (srb->cmnd[3] == 2) {
+ u8 tmp = chip->blink_led;
+ rtsx_stor_set_xfer_buf(&tmp, 1, srb);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ u16 len;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ len = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ len = min_t(u16, len, scsi_bufflen(srb));
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ RTSX_DEBUGP("Read from device\n");
+ else
+ RTSX_DEBUGP("Write to device\n");
+
+ retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
+ scsi_sg_count(srb), srb->sc_data_direction, 1000);
+ if (retval < 0) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card = get_lun_card(chip, lun);
+ u8 status[32];
+#ifdef SUPPORT_OCP
+ u8 oc_now_mask = 0, oc_ever_mask = 0;
+#endif
+
+ memset(status, 0, 32);
+
+ status[0] = (u8)(chip->product_id);
+ status[1] = chip->ic_version;
+
+ if (chip->auto_delink_en)
+ status[2] = 0x10;
+ else
+ status[2] = 0x00;
+
+ status[3] = 20;
+ status[4] = 10;
+ status[5] = 05;
+ status[6] = 21;
+
+ if (chip->card_wp)
+ status[7] = 0x20;
+ else
+ status[7] = 0x00;
+
+#ifdef SUPPORT_OCP
+ status[8] = 0;
+ if (CHECK_LUN_MODE(chip,
+ SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) {
+ oc_now_mask = MS_OC_NOW;
+ oc_ever_mask = MS_OC_EVER;
+ } else {
+ oc_now_mask = SD_OC_NOW;
+ oc_ever_mask = SD_OC_EVER;
+ }
+
+ if (chip->ocp_stat & oc_now_mask)
+ status[8] |= 0x02;
+
+ if (chip->ocp_stat & oc_ever_mask)
+ status[8] |= 0x01;
+#endif
+
+ if (card == SD_CARD) {
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_HCXC(sd_card)) {
+ if (sd_card->capacity > 0x4000000)
+ status[0x0E] = 0x02;
+ else
+ status[0x0E] = 0x01;
+ } else {
+ status[0x0E] = 0x00;
+ }
+
+ if (CHK_SD_SDR104(sd_card))
+ status[0x0F] = 0x03;
+ else if (CHK_SD_DDR50(sd_card))
+ status[0x0F] = 0x04;
+ else if (CHK_SD_SDR50(sd_card))
+ status[0x0F] = 0x02;
+ else if (CHK_SD_HS(sd_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ } else {
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ status[0x0E] = 0x01;
+ else
+ status[0x0E] = 0x00;
+
+ if (CHK_MMC_DDR52(sd_card))
+ status[0x0F] = 0x03;
+ else if (CHK_MMC_52M(sd_card))
+ status[0x0F] = 0x02;
+ else if (CHK_MMC_26M(sd_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ }
+ } else if (card == MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (CHK_MSXC(ms_card))
+ status[0x0E] = 0x01;
+ else
+ status[0x0E] = 0x00;
+
+ if (CHK_HG8BIT(ms_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ }
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (card == SD_CARD) {
+ status[0x17] = 0x80;
+ if (sd_card->sd_erase_status)
+ status[0x17] |= 0x01;
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ status[0x17] |= 0x02;
+ status[0x07] |= 0x40;
+ }
+ if (sd_card->sd_lock_status & SD_PWD_EXIST)
+ status[0x17] |= 0x04;
+ } else {
+ status[0x17] = 0x00;
+ }
+
+ RTSX_DEBUGP("status[0x17] = 0x%x\n", status[0x17]);
+#endif
+
+ status[0x18] = 0x8A;
+ status[0x1A] = 0x28;
+#ifdef SUPPORT_SD_LOCK
+ status[0x1F] = 0x01;
+#endif
+
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(status));
+ rtsx_stor_set_xfer_buf(status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int phy_debug_mode;
+ int retval;
+ u16 reg;
+
+ if (!CHECK_PID(chip, 0x5208)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ phy_debug_mode = (int)(srb->cmnd[3]);
+
+ if (phy_debug_mode) {
+ chip->phy_debug_mode = 1;
+ retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ rtsx_disable_bus_int(chip);
+
+ retval = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ reg |= 0x0001;
+ retval = rtsx_write_phy_register(chip, 0x1C, reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ chip->phy_debug_mode = 0;
+ retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0x77);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ rtsx_enable_bus_int(chip);
+
+ retval = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ reg &= 0xFFFE;
+ retval = rtsx_write_phy_register(chip, 0x1C, reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval = STATUS_SUCCESS;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_type, mask, value, idx;
+ u16 addr;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ rtsx_init_cmd(chip);
+ break;
+
+ case ADD_BATCHCMD:
+ cmd_type = srb->cmnd[4];
+ if (cmd_type > 2) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
+ mask = srb->cmnd[7];
+ value = srb->cmnd[8];
+ rtsx_add_cmd(chip, cmd_type, addr, mask, value);
+ break;
+
+ case SEND_BATCHCMD:
+ retval = rtsx_send_cmd(chip, 0, 1000);
+ break;
+
+ case GET_BATCHRSP:
+ idx = srb->cmnd[4];
+ value = *(rtsx_get_cmd_data(chip) + idx);
+ if (scsi_bufflen(srb) < 1) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ rtsx_stor_set_xfer_buf(&value, 1, srb);
+ scsi_set_resid(srb, 0);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int suit_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ case ADD_BATCHCMD:
+ case SEND_BATCHCMD:
+ case GET_BATCHRSP:
+ result = rw_mem_cmd_buf(srb, chip);
+ break;
+ default:
+ result = TRANSPORT_ERROR;
+ }
+
+ return result;
+}
+
+static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+ u16 val;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ if (len % 2)
+ len -= len % 2;
+
+ if (len) {
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len / 2; i++) {
+ retval = rtsx_read_phy_register(chip, addr + i, &val);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf[2*i] = (u8)(val >> 8);
+ buf[2*i+1] = (u8)val;
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
+ len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+ u16 val;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ if (len % 2)
+ len -= len % 2;
+
+ if (len) {
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
+ len);
+
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len / 2; i++) {
+ val = ((u16)buf[2*i] << 8) | buf[2*i+1];
+ retval = rtsx_write_phy_register(chip, addr + i, val);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr;
+ int retval;
+ u8 mode;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ mode = srb->cmnd[3];
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (mode == 0) {
+ retval = spi_erase_eeprom_chip(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else if (mode == 1) {
+ retval = spi_erase_eeprom_byte(chip, addr);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = spi_read_eeprom(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = spi_write_eeprom(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+ len = srb->cmnd[5];
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_read_efuse(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval, result = TRANSPORT_GOOD;
+ u16 val;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+ len = srb->cmnd[5];
+
+ len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ if (chip->asic_code) {
+ retval = rtsx_read_phy_register(chip, 0x08, &val);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_OFF);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ wait_timeout(600);
+
+ retval = rtsx_write_phy_register(chip, 0x08,
+ 0x4C00 | chip->phy_voltage);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_ON);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ wait_timeout(600);
+ }
+
+ retval = card_power_on(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ wait_timeout(50);
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_write_efuse(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ result = TRANSPORT_FAILED;
+ TRACE_GOTO(chip, Exit);
+ }
+ }
+
+Exit:
+ vfree(buf);
+
+ retval = card_power_off(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ if (chip->asic_code) {
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_OFF);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ wait_timeout(600);
+
+ retval = rtsx_write_phy_register(chip, 0x08, val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_ON);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ return result;
+}
+
+static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 func, func_max;
+ u16 addr, len;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ func = srb->cmnd[3];
+ addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
+
+ RTSX_DEBUGP("%s: func = %d, addr = 0x%x, len = %d\n", __func__, func,
+ addr, len);
+
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
+ func_max = 1;
+ else
+ func_max = 0;
+
+ if (func > func_max) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ len = (u16)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 func, func_max;
+ u16 addr, len;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ func = srb->cmnd[3];
+ addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
+
+ RTSX_DEBUGP("%s: func = %d, addr = 0x%x\n", __func__, func, addr);
+
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
+ func_max = 1;
+ else
+ func_max = 0;
+
+ if (func > func_max) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_write_cfg_seq(chip, func, addr, buf, len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[2]) {
+ case PP_READ10:
+ case PP_WRITE10:
+ result = read_write(srb, chip);
+ break;
+
+ case READ_HOST_REG:
+ result = read_host_reg(srb, chip);
+ break;
+
+ case WRITE_HOST_REG:
+ result = write_host_reg(srb, chip);
+ break;
+
+ case GET_VAR:
+ result = get_variable(srb, chip);
+ break;
+
+ case SET_VAR:
+ result = set_variable(srb, chip);
+ break;
+
+ case DMA_READ:
+ case DMA_WRITE:
+ result = dma_access_ring_buffer(srb, chip);
+ break;
+
+ case READ_PHY:
+ result = read_phy_register(srb, chip);
+ break;
+
+ case WRITE_PHY:
+ result = write_phy_register(srb, chip);
+ break;
+
+ case ERASE_EEPROM2:
+ result = erase_eeprom2(srb, chip);
+ break;
+
+ case READ_EEPROM2:
+ result = read_eeprom2(srb, chip);
+ break;
+
+ case WRITE_EEPROM2:
+ result = write_eeprom2(srb, chip);
+ break;
+
+ case READ_EFUSE:
+ result = read_efuse(srb, chip);
+ break;
+
+ case WRITE_EFUSE:
+ result = write_efuse(srb, chip);
+ break;
+
+ case READ_CFG:
+ result = read_cfg_byte(srb, chip);
+ break;
+
+ case WRITE_CFG:
+ result = write_cfg_byte(srb, chip);
+ break;
+
+ case SET_CHIP_MODE:
+ result = set_chip_mode(srb, chip);
+ break;
+
+ case SUIT_CMD:
+ result = suit_cmd(srb, chip);
+ break;
+
+ case GET_DEV_STATUS:
+ result = get_dev_status(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+
+static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 rtsx_status[16];
+ int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+
+ rtsx_status[0] = (u8)(chip->vendor_id >> 8);
+ rtsx_status[1] = (u8)(chip->vendor_id);
+
+ rtsx_status[2] = (u8)(chip->product_id >> 8);
+ rtsx_status[3] = (u8)(chip->product_id);
+
+ rtsx_status[4] = (u8)lun;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ if (chip->lun2card[lun] == SD_CARD)
+ rtsx_status[5] = 2;
+ else
+ rtsx_status[5] = 3;
+ } else {
+ if (chip->card_exist) {
+ if (chip->card_exist & XD_CARD)
+ rtsx_status[5] = 4;
+ else if (chip->card_exist & SD_CARD)
+ rtsx_status[5] = 2;
+ else if (chip->card_exist & MS_CARD)
+ rtsx_status[5] = 3;
+ else
+ rtsx_status[5] = 7;
+ } else {
+ rtsx_status[5] = 7;
+ }
+ }
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ rtsx_status[6] = 2;
+ else
+ rtsx_status[6] = 1;
+
+ rtsx_status[7] = (u8)(chip->product_id);
+ rtsx_status[8] = chip->ic_version;
+
+ if (check_card_exist(chip, lun))
+ rtsx_status[9] = 1;
+ else
+ rtsx_status[9] = 0;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ rtsx_status[10] = 0;
+ else
+ rtsx_status[10] = 1;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ if (chip->lun2card[lun] == SD_CARD)
+ rtsx_status[11] = SD_CARD;
+ else
+ rtsx_status[11] = MS_CARD;
+ } else {
+ rtsx_status[11] = XD_CARD | SD_CARD | MS_CARD;
+ }
+
+ if (check_card_ready(chip, lun))
+ rtsx_status[12] = 1;
+ else
+ rtsx_status[12] = 0;
+
+ if (get_lun_card(chip, lun) == XD_CARD) {
+ rtsx_status[13] = 0x40;
+ } else if (get_lun_card(chip, lun) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ rtsx_status[13] = 0x20;
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_HCXC(sd_card))
+ rtsx_status[13] |= 0x04;
+ if (CHK_SD_HS(sd_card))
+ rtsx_status[13] |= 0x02;
+ } else {
+ rtsx_status[13] |= 0x08;
+ if (CHK_MMC_52M(sd_card))
+ rtsx_status[13] |= 0x02;
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ rtsx_status[13] |= 0x04;
+ }
+ } else if (get_lun_card(chip, lun) == MS_CARD) {
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_MSPRO(ms_card)) {
+ rtsx_status[13] = 0x38;
+ if (CHK_HG8BIT(ms_card))
+ rtsx_status[13] |= 0x04;
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card))
+ rtsx_status[13] |= 0x01;
+#endif
+ } else {
+ rtsx_status[13] = 0x30;
+ }
+ } else {
+ if (CHECK_LUN_MODE(chip, DEFAULT_SINGLE)) {
+#ifdef SUPPORT_SDIO
+ if (chip->sd_io && chip->sd_int)
+ rtsx_status[13] = 0x60;
+ else
+ rtsx_status[13] = 0x70;
+#else
+ rtsx_status[13] = 0x70;
+#endif
+ } else {
+ if (chip->lun2card[lun] == SD_CARD)
+ rtsx_status[13] = 0x20;
+ else
+ rtsx_status[13] = 0x30;
+ }
+ }
+
+ rtsx_status[14] = 0x78;
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
+ rtsx_status[15] = 0x83;
+ else
+ rtsx_status[15] = 0x82;
+
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(rtsx_status));
+ rtsx_stor_set_xfer_buf(rtsx_status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card, bus_width;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ card = get_lun_card(chip, lun);
+ if ((card == SD_CARD) || (card == MS_CARD)) {
+ bus_width = chip->card_bus_width[lun];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rtsx_stor_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int spi_vendor_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 gpio_dir;
+
+ if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ rtsx_force_power_on(chip, SSC_PDCTL);
+
+ rtsx_read_register(chip, CARD_GPIO_DIR, &gpio_dir);
+ rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir & 0x06);
+
+ switch (srb->cmnd[2]) {
+ case SCSI_SPI_GETSTATUS:
+ result = spi_get_status(srb, chip);
+ break;
+
+ case SCSI_SPI_SETPARAMETER:
+ result = spi_set_parameter(srb, chip);
+ break;
+
+ case SCSI_SPI_READFALSHID:
+ result = spi_read_flash_id(srb, chip);
+ break;
+
+ case SCSI_SPI_READFLASH:
+ result = spi_read_flash(srb, chip);
+ break;
+
+ case SCSI_SPI_WRITEFLASH:
+ result = spi_write_flash(srb, chip);
+ break;
+
+ case SCSI_SPI_WRITEFLASHSTATUS:
+ result = spi_write_flash_status(srb, chip);
+ break;
+
+ case SCSI_SPI_ERASEFLASH:
+ result = spi_erase_flash(srb, chip);
+ break;
+
+ default:
+ rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
+
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
+
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ return TRANSPORT_GOOD;
+}
+
+static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[1]) {
+ case READ_STATUS:
+ result = read_status(srb, chip);
+ break;
+
+ case READ_MEM:
+ result = read_mem(srb, chip);
+ break;
+
+ case WRITE_MEM:
+ result = write_mem(srb, chip);
+ break;
+
+ case READ_EEPROM:
+ result = read_eeprom(srb, chip);
+ break;
+
+ case WRITE_EEPROM:
+ result = write_eeprom(srb, chip);
+ break;
+
+ case TOGGLE_GPIO:
+ result = toggle_gpio_cmd(srb, chip);
+ break;
+
+ case GET_SD_CSD:
+ result = get_sd_csd(srb, chip);
+ break;
+
+ case GET_BUS_WIDTH:
+ result = get_card_bus_width(srb, chip);
+ break;
+
+#ifdef _MSG_TRACE
+ case TRACE_MSG:
+ result = trace_msg_cmd(srb, chip);
+ break;
+#endif
+
+ case SCSI_APP_CMD:
+ result = app_cmd(srb, chip);
+ break;
+
+ case SPI_VENDOR_COMMAND:
+ result = spi_vendor_cmd(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
+void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u16 sec_cnt;
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10))
+ sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6))
+ sec_cnt = srb->cmnd[4];
+ else
+ return;
+
+ if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
+ toggle_gpio(chip, LED_GPIO);
+ chip->rw_cap[lun] = 0;
+ } else {
+ chip->rw_cap[lun] += sec_cnt;
+ }
+}
+#endif
+
+static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, quick_format;
+
+ if (get_lun_card(chip, lun) != MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
+ (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
+ (srb->cmnd[7] != 0x74)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+
+ if (!check_card_ready(chip, lun) ||
+ (get_card_size(chip, lun) == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ if (srb->cmnd[8] & 0x01)
+ quick_format = 0;
+ else
+ quick_format = 1;
+
+ if (!(chip->card_ready & MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (chip->card_wp & MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+#ifdef SUPPORT_PCGL_1P18
+static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ u8 dev_info_id, data_len;
+ u8 *buf;
+ unsigned int buf_len;
+ int i;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ dev_info_id = srb->cmnd[3];
+ if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (dev_info_id == 0x15)
+ buf_len = data_len = 0x3A;
+ else
+ buf_len = data_len = 0x6A;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ i = 0;
+ /* GET Memory Stick Media Information Response Header */
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Device Information Type Code */
+ if (CHK_MSXC(ms_card))
+ buf[i++] = 0x03;
+ else
+ buf[i++] = 0x02;
+
+ /* SGM bit */
+ buf[i++] = 0x01;
+ /* Reserved */
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ /* Number of Device Information */
+ buf[i++] = 0x01;
+
+ /* Device Information Body */
+
+ /* Device Information ID Number */
+ buf[i++] = dev_info_id;
+ /* Device Information Length */
+ if (dev_info_id == 0x15)
+ data_len = 0x31;
+ else
+ data_len = 0x61;
+
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Valid Bit */
+ buf[i++] = 0x80;
+ if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
+ /* System Information */
+ memcpy(buf+i, ms_card->raw_sys_info, 96);
+ } else {
+ /* Model Name */
+ memcpy(buf+i, ms_card->raw_model_name, 48);
+ }
+
+ rtsx_stor_set_xfer_buf(buf, buf_len, srb);
+
+ if (dev_info_id == 0x15)
+ scsi_set_resid(srb, scsi_bufflen(srb)-0x3C);
+ else
+ scsi_set_resid(srb, scsi_bufflen(srb)-0x6C);
+
+ kfree(buf);
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval = TRANSPORT_ERROR;
+
+ if (srb->cmnd[2] == MS_FORMAT)
+ retval = ms_format_cmnd(srb, chip);
+#ifdef SUPPORT_PCGL_1P18
+ else if (srb->cmnd[2] == GET_MS_INFORMATION)
+ retval = get_ms_information(srb, chip);
+#endif
+
+ return retval;
+}
+
+#ifdef SUPPORT_CPRM
+static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ sd_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != SD_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[0]) {
+ case SD_PASS_THRU_MODE:
+ result = sd_pass_thru_mode(srb, chip);
+ break;
+
+ case SD_EXECUTE_NO_DATA:
+ result = sd_execute_no_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_READ:
+ result = sd_execute_read_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_WRITE:
+ result = sd_execute_write_data(srb, chip);
+ break;
+
+ case SD_GET_RSP:
+ result = sd_get_cmd_rsp(srb, chip);
+ break;
+
+ case SD_HW_RST:
+ result = sd_hw_rst(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+ RTSX_DEBUGP("key_format = 0x%x\n", key_format);
+
+ switch (key_format) {
+ case KF_GET_LOC_EKB:
+ if ((scsi_bufflen(srb) == 0x41C) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x1C)) {
+ retval = mg_get_local_EKB(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_CHG:
+ if ((scsi_bufflen(srb) == 0x24) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x24)) {
+ retval = mg_get_rsp_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_GET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
+ retval = mg_get_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (check_card_wp(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+ RTSX_DEBUGP("key_format = 0x%x\n", key_format);
+
+ switch (key_format) {
+ case KF_SET_LEAF_ID:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
+ retval = mg_set_leaf_id(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_CHG_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
+ retval = mg_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
+ retval = mg_rsp(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_SET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
+ retval = mg_set_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Block all SCSI command except for
+ * REQUEST_SENSE and rs_ppstatus
+ */
+ if (!((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ (srb->cmnd[2] == GET_DEV_STATUS)) &&
+ (srb->cmnd[0] != REQUEST_SENSE)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR,
+ 0x02, 0, 0x04, 0x04, 0, 0);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ if ((srb->cmnd[0] != REQUEST_SENSE) &&
+ (srb->cmnd[0] != INQUIRY)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16)(ms_card->progress));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ switch (srb->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_6:
+ case WRITE_6:
+ result = read_write(srb, chip);
+#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
+ led_shine(srb, chip);
+#endif
+ break;
+
+ case TEST_UNIT_READY:
+ result = test_unit_ready(srb, chip);
+ break;
+
+ case INQUIRY:
+ result = inquiry(srb, chip);
+ break;
+
+ case READ_CAPACITY:
+ result = read_capacity(srb, chip);
+ break;
+
+ case START_STOP:
+ result = start_stop_unit(srb, chip);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ result = allow_medium_removal(srb, chip);
+ break;
+
+ case REQUEST_SENSE:
+ result = request_sense(srb, chip);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ result = mode_sense(srb, chip);
+ break;
+
+ case 0x23:
+ result = read_format_capacity(srb, chip);
+ break;
+
+ case VENDOR_CMND:
+ result = vendor_cmnd(srb, chip);
+ break;
+
+ case MS_SP_CMND:
+ result = ms_sp_cmnd(srb, chip);
+ break;
+
+#ifdef SUPPORT_CPRM
+ case SD_PASS_THRU_MODE:
+ case SD_EXECUTE_NO_DATA:
+ case SD_EXECUTE_READ:
+ case SD_EXECUTE_WRITE:
+ case SD_GET_RSP:
+ case SD_HW_RST:
+ result = sd_extention_cmnd(srb, chip);
+ break;
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ case CMD_MSPRO_MG_RKEY:
+ result = mg_report_key(srb, chip);
+ break;
+
+ case CMD_MSPRO_MG_SKEY:
+ result = mg_send_key(srb, chip);
+ break;
+#endif
+
+ case FORMAT_UNIT:
+ case MODE_SELECT:
+ case VERIFY:
+ result = TRANSPORT_GOOD;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ result = TRANSPORT_FAILED;
+ }
+
+ return result;
+}
diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h
new file mode 100644
index 0000000..d175057
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_scsi.h
@@ -0,0 +1,143 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_SCSI_H
+#define __REALTEK_RTSX_SCSI_H
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+
+#define MS_SP_CMND 0xFA
+#define MS_FORMAT 0xA0
+#define GET_MS_INFORMATION 0xB0
+
+#define VENDOR_CMND 0xF0
+
+#define READ_STATUS 0x09
+
+#define READ_EEPROM 0x04
+#define WRITE_EEPROM 0x05
+#define READ_MEM 0x0D
+#define WRITE_MEM 0x0E
+#define GET_BUS_WIDTH 0x13
+#define GET_SD_CSD 0x14
+#define TOGGLE_GPIO 0x15
+#define TRACE_MSG 0x18
+
+#define SCSI_APP_CMD 0x10
+
+#define PP_READ10 0x1A
+#define PP_WRITE10 0x0A
+#define READ_HOST_REG 0x1D
+#define WRITE_HOST_REG 0x0D
+#define SET_VAR 0x05
+#define GET_VAR 0x15
+#define DMA_READ 0x16
+#define DMA_WRITE 0x06
+#define GET_DEV_STATUS 0x10
+#define SET_CHIP_MODE 0x27
+#define SUIT_CMD 0xE0
+#define WRITE_PHY 0x07
+#define READ_PHY 0x17
+#define WRITE_EEPROM2 0x03
+#define READ_EEPROM2 0x13
+#define ERASE_EEPROM2 0x23
+#define WRITE_EFUSE 0x04
+#define READ_EFUSE 0x14
+#define WRITE_CFG 0x0E
+#define READ_CFG 0x1E
+
+#define SPI_VENDOR_COMMAND 0x1C
+
+#define SCSI_SPI_GETSTATUS 0x00
+#define SCSI_SPI_SETPARAMETER 0x01
+#define SCSI_SPI_READFALSHID 0x02
+#define SCSI_SPI_READFLASH 0x03
+#define SCSI_SPI_WRITEFLASH 0x04
+#define SCSI_SPI_WRITEFLASHSTATUS 0x05
+#define SCSI_SPI_ERASEFLASH 0x06
+
+#define INIT_BATCHCMD 0x41
+#define ADD_BATCHCMD 0x42
+#define SEND_BATCHCMD 0x43
+#define GET_BATCHRSP 0x44
+
+#define CHIP_NORMALMODE 0x00
+#define CHIP_DEBUGMODE 0x01
+
+/* SD Pass Through Command Extension */
+#define SD_PASS_THRU_MODE 0xD0
+#define SD_EXECUTE_NO_DATA 0xD1
+#define SD_EXECUTE_READ 0xD2
+#define SD_EXECUTE_WRITE 0xD3
+#define SD_GET_RSP 0xD4
+#define SD_HW_RST 0xD6
+
+#ifdef SUPPORT_MAGIC_GATE
+#define CMD_MSPRO_MG_RKEY 0xA4 /* Report Key Command */
+#define CMD_MSPRO_MG_SKEY 0xA3 /* Send Key Command */
+
+/* CBWCB field: key class */
+#define KC_MG_R_PRO 0xBE /* MG-R PRO*/
+
+/* CBWCB field: key format */
+#define KF_SET_LEAF_ID 0x31 /* Set Leaf ID */
+#define KF_GET_LOC_EKB 0x32 /* Get Local EKB */
+#define KF_CHG_HOST 0x33 /* Challenge (host) */
+#define KF_RSP_CHG 0x34 /* Response and Challenge (device) */
+#define KF_RSP_HOST 0x35 /* Response (host) */
+#define KF_GET_ICV 0x36 /* Get ICV */
+#define KF_SET_ICV 0x37 /* SSet ICV */
+#endif
+
+/* Sense type */
+#define SENSE_TYPE_NO_SENSE 0
+#define SENSE_TYPE_MEDIA_CHANGE 1
+#define SENSE_TYPE_MEDIA_NOT_PRESENT 2
+#define SENSE_TYPE_MEDIA_LBA_OVER_RANGE 3
+#define SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT 4
+#define SENSE_TYPE_MEDIA_WRITE_PROTECT 5
+#define SENSE_TYPE_MEDIA_INVALID_CMD_FIELD 6
+#define SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR 7
+#define SENSE_TYPE_MEDIA_WRITE_ERR 8
+#define SENSE_TYPE_FORMAT_IN_PROGRESS 9
+#define SENSE_TYPE_FORMAT_CMD_FAILED 10
+#ifdef SUPPORT_MAGIC_GATE
+#define SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB 0x0b
+#define SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN 0x0c
+#define SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM 0x0d
+#define SENSE_TYPE_MG_WRITE_ERR 0x0e
+#endif
+#ifdef SUPPORT_SD_LOCK
+/* FOR Locked SD card*/
+#define SENSE_TYPE_MEDIA_READ_FORBIDDEN 0x10
+#endif
+
+void scsi_show_command(struct scsi_cmnd *srb);
+void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type);
+void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq,
+ u8 sns_key_info0, u16 sns_key_info1);
+int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+
+#endif /* __REALTEK_RTSX_SCSI_H */
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
new file mode 100644
index 0000000..0b6b4d4
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -0,0 +1,50 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __RTSX_SYS_H
+#define __RTSX_SYS_H
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+#include "rtsx_card.h"
+
+typedef dma_addr_t ULONG_PTR;
+
+static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
+{
+ struct rtsx_dev *dev = chip->rtsx;
+
+ spin_lock(&(dev->reg_lock));
+ rtsx_enter_ss(chip);
+ spin_unlock(&(dev->reg_lock));
+}
+
+static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag)
+{
+ rtsx_reset_cards(chip);
+}
+
+#define RTSX_MSG_IN_INT(x)
+
+#endif /* __RTSX_SYS_H */
+
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
new file mode 100644
index 0000000..97b7b01
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -0,0 +1,769 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "rtsx.h"
+#include "rtsx_scsi.h"
+#include "rtsx_transport.h"
+#include "rtsx_chip.h"
+#include "rtsx_card.h"
+#include "debug.h"
+
+/***********************************************************************
+ * Scatter-gather transfer buffer access routines
+ ***********************************************************************/
+
+/* Copy a buffer of length buflen to/from the srb's transfer buffer.
+ * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
+ * points to a list of s-g entries and we ignore srb->request_bufflen.
+ * For non-scatter-gather transfers, srb->request_buffer points to the
+ * transfer buffer itself and srb->request_bufflen is the buffer's length.)
+ * Update the *index and *offset variables so that the next copy will
+ * pick up from where this one left off. */
+
+unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
+ unsigned int *offset, enum xfer_buf_dir dir)
+{
+ unsigned int cnt;
+
+ /* If not using scatter-gather, just transfer the data directly.
+ * Make certain it will fit in the available buffer space. */
+ if (scsi_sg_count(srb) == 0) {
+ if (*offset >= scsi_bufflen(srb))
+ return 0;
+ cnt = min(buflen, scsi_bufflen(srb) - *offset);
+ if (dir == TO_XFER_BUF)
+ memcpy((unsigned char *) scsi_sglist(srb) + *offset,
+ buffer, cnt);
+ else
+ memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
+ *offset, cnt);
+ *offset += cnt;
+
+ /* Using scatter-gather. We have to go through the list one entry
+ * at a time. Each s-g entry contains some number of pages, and
+ * each page has to be kmap()'ed separately. If the page is already
+ * in kernel-addressable memory then kmap() will return its address.
+ * If the page is not directly accessible -- such as a user buffer
+ * located in high memory -- then kmap() will map it to a temporary
+ * position in the kernel's virtual address space. */
+ } else {
+ struct scatterlist *sg =
+ (struct scatterlist *) scsi_sglist(srb)
+ + *index;
+
+ /* This loop handles a single s-g list entry, which may
+ * include multiple pages. Find the initial page structure
+ * and the starting offset within the page, and update
+ * the *offset and *index values for the next loop. */
+ cnt = 0;
+ while (cnt < buflen && *index < scsi_sg_count(srb)) {
+ struct page *page = sg_page(sg) +
+ ((sg->offset + *offset) >> PAGE_SHIFT);
+ unsigned int poff =
+ (sg->offset + *offset) & (PAGE_SIZE-1);
+ unsigned int sglen = sg->length - *offset;
+
+ if (sglen > buflen - cnt) {
+
+ /* Transfer ends within this s-g entry */
+ sglen = buflen - cnt;
+ *offset += sglen;
+ } else {
+
+ /* Transfer continues to next s-g entry */
+ *offset = 0;
+ ++*index;
+ ++sg;
+ }
+
+ /* Transfer the data for all the pages in this
+ * s-g entry. For each page: call kmap(), do the
+ * transfer, and call kunmap() immediately after. */
+ while (sglen > 0) {
+ unsigned int plen = min(sglen, (unsigned int)
+ PAGE_SIZE - poff);
+ unsigned char *ptr = kmap(page);
+
+ if (dir == TO_XFER_BUF)
+ memcpy(ptr + poff, buffer + cnt, plen);
+ else
+ memcpy(buffer + cnt, ptr + poff, plen);
+ kunmap(page);
+
+ /* Start at the beginning of the next page */
+ poff = 0;
+ ++page;
+ cnt += plen;
+ sglen -= plen;
+ }
+ }
+ }
+
+ /* Return the amount actually transferred */
+ return cnt;
+}
+
+/* Store the contents of buffer into srb's transfer buffer and set the
+* SCSI residue. */
+void rtsx_stor_set_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb)
+{
+ unsigned int index = 0, offset = 0;
+
+ rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
+ TO_XFER_BUF);
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
+}
+
+void rtsx_stor_get_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb)
+{
+ unsigned int index = 0, offset = 0;
+
+ rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
+ FROM_XFER_BUF);
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
+}
+
+
+/***********************************************************************
+ * Transport routines
+ ***********************************************************************/
+
+/* Invoke the transport and basic error-handling/recovery methods
+ *
+ * This is used to send the message to the device and receive the response.
+ */
+void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ result = rtsx_scsi_handler(srb, chip);
+
+ /* if the command gets aborted by the higher layers, we need to
+ * short-circuit all other processing
+ */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
+ RTSX_DEBUGP("-- command was aborted\n");
+ srb->result = DID_ABORT << 16;
+ goto Handle_Errors;
+ }
+
+ /* if there is a transport error, reset and don't auto-sense */
+ if (result == TRANSPORT_ERROR) {
+ RTSX_DEBUGP("-- transport indicates error, resetting\n");
+ srb->result = DID_ERROR << 16;
+ goto Handle_Errors;
+ }
+
+ srb->result = SAM_STAT_GOOD;
+
+ /*
+ * If we have a failure, we're going to do a REQUEST_SENSE
+ * automatically. Note that we differentiate between a command
+ * "failure" and an "error" in the transport mechanism.
+ */
+ if (result == TRANSPORT_FAILED) {
+ /* set the result so the higher layers expect this data */
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
+ sizeof(struct sense_data_t));
+ }
+
+ return;
+
+ /* Error and abort processing: try to resynchronize with the device
+ * by issuing a port reset. If that fails, try a class-specific
+ * device reset. */
+Handle_Errors:
+ return;
+}
+
+void rtsx_add_cmd(struct rtsx_chip *chip,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+{
+ u32 *cb = (u32 *)(chip->host_cmds_ptr);
+ u32 val = 0;
+
+ val |= (u32)(cmd_type & 0x03) << 30;
+ val |= (u32)(reg_addr & 0x3FFF) << 16;
+ val |= (u32)mask << 8;
+ val |= (u32)data;
+
+ spin_lock_irq(&chip->rtsx->reg_lock);
+ if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
+ cb[(chip->ci)++] = cpu_to_le32(val);
+
+ spin_unlock_irq(&chip->rtsx->reg_lock);
+}
+
+void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
+{
+ u32 val = 1 << 31;
+
+ rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
+
+ val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
+ /* Hardware Auto Response */
+ val |= 0x40000000;
+ rtsx_writel(chip, RTSX_HCBCTLR, val);
+}
+
+int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ u32 val = 1 << 31;
+ long timeleft;
+ int err = 0;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+ rtsx->trans_result = TRANS_NOT_READY;
+ init_completion(&trans_done);
+ rtsx->trans_state = STATE_TRANS_CMD;
+
+ rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
+
+ val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
+ /* Hardware Auto Response */
+ val |= 0x40000000;
+ rtsx_writel(chip, RTSX_HCBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ /* Wait for TRANS_OK_INT */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ TRACE_GOTO(chip, finish_send_cmd);
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+finish_send_cmd:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+static inline void rtsx_add_sg_tbl(
+ struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
+{
+ u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
+ u64 val = 0;
+ u32 temp_len = 0;
+ u8 temp_opt = 0;
+
+ do {
+ if (len > 0x80000) {
+ temp_len = 0x80000;
+ temp_opt = option & (~SG_END);
+ } else {
+ temp_len = len;
+ temp_opt = option;
+ }
+ val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
+
+ if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
+ sgb[(chip->sgi)++] = cpu_to_le64(val);
+
+ len -= temp_len;
+ addr += temp_len;
+ } while (len);
+}
+
+static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ struct scatterlist *sg, int num_sg, unsigned int *index,
+ unsigned int *offset, int size,
+ enum dma_data_direction dma_dir, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ u8 dir;
+ int sg_cnt, i, resid;
+ int err = 0;
+ long timeleft;
+ struct scatterlist *sg_ptr;
+ u32 val = TRIG_DMA;
+
+ if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
+ return -EIO;
+
+ if (dma_dir == DMA_TO_DEVICE)
+ dir = HOST_TO_DEVICE;
+ else if (dma_dir == DMA_FROM_DEVICE)
+ dir = DEVICE_TO_HOST;
+ else
+ return -ENXIO;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+
+ rtsx->trans_state = STATE_TRANS_SG;
+ rtsx->trans_result = TRANS_NOT_READY;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ resid = size;
+ sg_ptr = sg;
+ chip->sgi = 0;
+ /* Usually the next entry will be @sg@ + 1, but if this sg element
+ * is part of a chained scatterlist, it could jump to the start of
+ * a new scatterlist array. So here we use sg_next to move to
+ * the proper sg
+ */
+ for (i = 0; i < *index; i++)
+ sg_ptr = sg_next(sg_ptr);
+ for (i = *index; i < sg_cnt; i++) {
+ dma_addr_t addr;
+ unsigned int len;
+ u8 option;
+
+ addr = sg_dma_address(sg_ptr);
+ len = sg_dma_len(sg_ptr);
+
+ RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+ RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
+
+ addr += *offset;
+
+ if ((len - *offset) > resid) {
+ *offset += resid;
+ len = resid;
+ resid = 0;
+ } else {
+ resid -= (len - *offset);
+ len -= *offset;
+ *offset = 0;
+ *index = *index + 1;
+ }
+ if ((i == (sg_cnt - 1)) || !resid)
+ option = SG_VALID | SG_END | SG_TRANS_DATA;
+ else
+ option = SG_VALID | SG_TRANS_DATA;
+
+ rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
+
+ if (!resid)
+ break;
+
+ sg_ptr = sg_next(sg_ptr);
+ }
+
+ RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
+
+ val |= (u32)(dir & 0x01) << 29;
+ val |= ADMA_MODE;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ init_completion(&trans_done);
+
+ rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
+ rtsx_writel(chip, RTSX_HDBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL) {
+ err = -EIO;
+ spin_unlock_irq(&rtsx->reg_lock);
+ goto out;
+ }
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ /* Wait for TRANS_OK_INT */
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_NOT_READY) {
+ init_completion(&trans_done);
+ spin_unlock_irq(&rtsx->reg_lock);
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ spin_unlock_irq(&rtsx->reg_lock);
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+out:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+ dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
+ struct scatterlist *sg, int num_sg,
+ enum dma_data_direction dma_dir, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ u8 dir;
+ int buf_cnt, i;
+ int err = 0;
+ long timeleft;
+ struct scatterlist *sg_ptr;
+
+ if ((sg == NULL) || (num_sg <= 0))
+ return -EIO;
+
+ if (dma_dir == DMA_TO_DEVICE)
+ dir = HOST_TO_DEVICE;
+ else if (dma_dir == DMA_FROM_DEVICE)
+ dir = DEVICE_TO_HOST;
+ else
+ return -ENXIO;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+
+ rtsx->trans_state = STATE_TRANS_SG;
+ rtsx->trans_result = TRANS_NOT_READY;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ sg_ptr = sg;
+
+ for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
+ u32 val = TRIG_DMA;
+ int sg_cnt, j;
+
+ if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
+ sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
+ else
+ sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
+
+ chip->sgi = 0;
+ for (j = 0; j < sg_cnt; j++) {
+ dma_addr_t addr = sg_dma_address(sg_ptr);
+ unsigned int len = sg_dma_len(sg_ptr);
+ u8 option;
+
+ RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+
+ if (j == (sg_cnt - 1))
+ option = SG_VALID | SG_END | SG_TRANS_DATA;
+ else
+ option = SG_VALID | SG_TRANS_DATA;
+
+ rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
+
+ sg_ptr = sg_next(sg_ptr);
+ }
+
+ RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
+
+ val |= (u32)(dir & 0x01) << 29;
+ val |= ADMA_MODE;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ init_completion(&trans_done);
+
+ rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
+ rtsx_writel(chip, RTSX_HDBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL) {
+ err = -EIO;
+ spin_unlock_irq(&rtsx->reg_lock);
+ goto out;
+ }
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ sg_ptr += sg_cnt;
+ }
+
+ /* Wait for TRANS_OK_INT */
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_NOT_READY) {
+ init_completion(&trans_done);
+ spin_unlock_irq(&rtsx->reg_lock);
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ spin_unlock_irq(&rtsx->reg_lock);
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+out:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+ dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
+ enum dma_data_direction dma_dir, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ dma_addr_t addr;
+ u8 dir;
+ int err = 0;
+ u32 val = (1 << 31);
+ long timeleft;
+
+ if ((buf == NULL) || (len <= 0))
+ return -EIO;
+
+ if (dma_dir == DMA_TO_DEVICE)
+ dir = HOST_TO_DEVICE;
+ else if (dma_dir == DMA_FROM_DEVICE)
+ dir = DEVICE_TO_HOST;
+ else
+ return -ENXIO;
+
+ addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
+ if (!addr)
+ return -ENOMEM;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ val |= (u32)(dir & 0x01) << 29;
+ val |= (u32)(len & 0x00FFFFFF);
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+
+ init_completion(&trans_done);
+
+ rtsx->trans_state = STATE_TRANS_BUF;
+ rtsx->trans_result = TRANS_NOT_READY;
+
+ rtsx_writel(chip, RTSX_HDBAR, addr);
+ rtsx_writel(chip, RTSX_HDBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ /* Wait for TRANS_OK_INT */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+out:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+ dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
+ void *buf, size_t len, int use_sg, unsigned int *index,
+ unsigned int *offset, enum dma_data_direction dma_dir,
+ int timeout)
+{
+ int err = 0;
+
+ /* don't transfer data during abort processing */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
+ return -EIO;
+
+ if (use_sg) {
+ err = rtsx_transfer_sglist_adma_partial(chip, card,
+ (struct scatterlist *)buf, use_sg,
+ index, offset, (int)len, dma_dir, timeout);
+ } else {
+ err = rtsx_transfer_buf(chip, card,
+ buf, len, dma_dir, timeout);
+ }
+
+ if (err < 0) {
+ if (RTSX_TST_DELINK(chip)) {
+ RTSX_CLR_DELINK(chip);
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 1);
+ }
+ }
+
+ return err;
+}
+
+int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
+ int use_sg, enum dma_data_direction dma_dir, int timeout)
+{
+ int err = 0;
+
+ RTSX_DEBUGP("use_sg = %d\n", use_sg);
+
+ /* don't transfer data during abort processing */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
+ return -EIO;
+
+ if (use_sg) {
+ err = rtsx_transfer_sglist_adma(chip, card,
+ (struct scatterlist *)buf,
+ use_sg, dma_dir, timeout);
+ } else {
+ err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
+ }
+
+ if (err < 0) {
+ if (RTSX_TST_DELINK(chip)) {
+ RTSX_CLR_DELINK(chip);
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 1);
+ }
+ }
+
+ return err;
+}
+
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
new file mode 100644
index 0000000..b4b1123
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -0,0 +1,66 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_TRANSPORT_H
+#define __REALTEK_RTSX_TRANSPORT_H
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+
+#define WAIT_TIME 2000
+
+unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
+ unsigned int *offset, enum xfer_buf_dir dir);
+void rtsx_stor_set_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb);
+void rtsx_stor_get_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb);
+void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+
+
+#define rtsx_init_cmd(chip) ((chip)->ci = 0)
+
+void rtsx_add_cmd(struct rtsx_chip *chip,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
+int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
+
+extern inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
+{
+#ifdef CMD_USING_SG
+ return (u8 *)(chip->host_sg_tbl_ptr);
+#else
+ return (u8 *)(chip->host_cmds_ptr);
+#endif
+}
+
+int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
+ int use_sg, enum dma_data_direction dma_dir, int timeout);
+
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
+ void *buf, size_t len,
+ int use_sg, unsigned int *index, unsigned int *offset,
+ enum dma_data_direction dma_dir, int timeout);
+
+#endif /* __REALTEK_RTSX_TRANSPORT_H */
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
new file mode 100644
index 0000000..c7c1f54
--- /dev/null
+++ b/drivers/staging/rts5208/sd.c
@@ -0,0 +1,4525 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "sd.h"
+
+#define SD_MAX_RETRY_COUNT 3
+
+static u16 REG_SD_CFG1;
+static u16 REG_SD_CFG2;
+static u16 REG_SD_CFG3;
+static u16 REG_SD_STAT1;
+static u16 REG_SD_STAT2;
+static u16 REG_SD_BUS_STAT;
+static u16 REG_SD_PAD_CTL;
+static u16 REG_SD_SAMPLE_POINT_CTL;
+static u16 REG_SD_PUSH_POINT_CTL;
+static u16 REG_SD_CMD0;
+static u16 REG_SD_CMD1;
+static u16 REG_SD_CMD2;
+static u16 REG_SD_CMD3;
+static u16 REG_SD_CMD4;
+static u16 REG_SD_CMD5;
+static u16 REG_SD_BYTE_CNT_L;
+static u16 REG_SD_BYTE_CNT_H;
+static u16 REG_SD_BLOCK_CNT_L;
+static u16 REG_SD_BLOCK_CNT_H;
+static u16 REG_SD_TRANSFER;
+static u16 REG_SD_VPCLK0_CTL;
+static u16 REG_SD_VPCLK1_CTL;
+static u16 REG_SD_DCMPS0_CTL;
+static u16 REG_SD_DCMPS1_CTL;
+
+static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->err_code |= err_code;
+}
+
+static inline void sd_clr_err_code(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->err_code = 0;
+}
+
+static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ return sd_card->err_code & err_code;
+}
+
+static void sd_init_reg_addr(struct rtsx_chip *chip)
+{
+ REG_SD_CFG1 = 0xFD31;
+ REG_SD_CFG2 = 0xFD33;
+ REG_SD_CFG3 = 0xFD3E;
+ REG_SD_STAT1 = 0xFD30;
+ REG_SD_STAT2 = 0;
+ REG_SD_BUS_STAT = 0;
+ REG_SD_PAD_CTL = 0;
+ REG_SD_SAMPLE_POINT_CTL = 0;
+ REG_SD_PUSH_POINT_CTL = 0;
+ REG_SD_CMD0 = 0xFD34;
+ REG_SD_CMD1 = 0xFD35;
+ REG_SD_CMD2 = 0xFD36;
+ REG_SD_CMD3 = 0xFD37;
+ REG_SD_CMD4 = 0xFD38;
+ REG_SD_CMD5 = 0xFD5A;
+ REG_SD_BYTE_CNT_L = 0xFD39;
+ REG_SD_BYTE_CNT_H = 0xFD3A;
+ REG_SD_BLOCK_CNT_L = 0xFD3B;
+ REG_SD_BLOCK_CNT_H = 0xFD3C;
+ REG_SD_TRANSFER = 0xFD32;
+ REG_SD_VPCLK0_CTL = 0;
+ REG_SD_VPCLK1_CTL = 0;
+ REG_SD_DCMPS0_CTL = 0;
+ REG_SD_DCMPS1_CTL = 0;
+}
+
+static int sd_check_data0_status(struct rtsx_chip *chip)
+{
+ u8 stat;
+
+ RTSX_READ_REG(chip, REG_SD_STAT1, &stat);
+
+ if (!(stat & SD_DAT0_STATUS)) {
+ sd_set_err_code(chip, SD_BUSY);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int timeout = 100;
+ u16 reg_addr;
+ u8 *ptr;
+ int stat_idx = 0;
+ int rty_cnt = 0;
+
+ sd_clr_err_code(chip);
+
+ RTSX_DEBUGP("SD/MMC CMD %d, arg = 0x%08x\n", cmd_idx, arg);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+RTY_SEND_CMD:
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 16;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 5;
+ }
+
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ u8 val;
+
+ rtsx_read_register(chip, REG_SD_STAT1, &val);
+ RTSX_DEBUGP("SD_STAT1: 0x%x\n", val);
+
+ rtsx_read_register(chip, REG_SD_CFG3, &val);
+ RTSX_DEBUGP("SD_CFG3: 0x%x\n", val);
+
+ if (retval == -ETIMEDOUT) {
+ if (rsp_type & SD_WAIT_BUSY_END) {
+ retval = sd_check_data0_status(chip);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+ } else {
+ sd_set_err_code(chip, SD_TO_ERR);
+ }
+ retval = STATUS_TIMEDOUT;
+ } else {
+ retval = STATUS_FAIL;
+ }
+ rtsx_clear_sd_error(chip);
+
+ TRACE_RET(chip, retval);
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if ((ptr[0] & 0xC0) != 0) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (rty_cnt < SD_MAX_RETRY_COUNT) {
+ wait_timeout(20);
+ rty_cnt++;
+ goto RTY_SEND_CMD;
+ } else {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
+ if ((cmd_idx != SEND_RELATIVE_ADDR) &&
+ (cmd_idx != SEND_IF_COND)) {
+ if (cmd_idx != STOP_TRANSMISSION) {
+ if (ptr[1] & 0x80)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (ptr[1] & 0x7D)
+#else
+ if (ptr[1] & 0x7F)
+#endif
+ {
+ RTSX_DEBUGP("ptr[1]: 0x%02x\n", ptr[1]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[2] & 0xFF) {
+ RTSX_DEBUGP("ptr[2]: 0x%02x\n", ptr[2]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[3] & 0x80) {
+ RTSX_DEBUGP("ptr[3]: 0x%02x\n", ptr[3]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[3] & 0x01)
+ sd_card->sd_data_buf_ready = 1;
+ else
+ sd_card->sd_data_buf_ready = 0;
+ }
+ }
+
+ if (rsp && rsp_len)
+ memcpy(rsp, ptr, rsp_len);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_read_data(struct rtsx_chip *chip,
+ u8 trans_mode, u8 *cmd, int cmd_len, u16 byte_cnt,
+ u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
+ int timeout)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ sd_clr_err_code(chip);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf_len > 512)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ if (cmd_len) {
+ RTSX_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40);
+ for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i,
+ 0xFF, cmd[i]);
+ }
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ (u8)byte_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ (u8)(byte_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ (u8)blk_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ (u8)(blk_cnt >> 8));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING)
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf && buf_len) {
+ retval = rtsx_read_ppbuf(chip, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
+ u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width,
+ u8 *buf, int buf_len, int timeout)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ sd_clr_err_code(chip);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf_len > 512) {
+ /* This function can't write data more than one page */
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf && buf_len) {
+ retval = rtsx_write_ppbuf(chip, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ if (cmd_len) {
+ RTSX_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40);
+ for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ REG_SD_CMD0 + i, 0xFF, cmd[i]);
+ }
+ }
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ (u8)byte_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ (u8)(byte_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ (u8)blk_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ (u8)(blk_cnt >> 8));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u8 csd_ver, trans_speed;
+ u8 rsp[16];
+
+ for (i = 0; i < 6; i++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
+ SD_RSP_TYPE_R2, rsp, 16);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+
+ if (i == 6)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ memcpy(sd_card->raw_csd, rsp + 1, 15);
+
+ RTSX_DEBUGP("CSD Response:\n");
+ RTSX_DUMP(sd_card->raw_csd, 16);
+
+ csd_ver = (rsp[1] & 0xc0) >> 6;
+ RTSX_DEBUGP("csd_ver = %d\n", csd_ver);
+
+ trans_speed = rsp[4];
+ if ((trans_speed & 0x07) == 0x02) {
+ if ((trans_speed & 0xf8) >= 0x30) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 47;
+ else
+ sd_card->sd_clock = CLK_50;
+
+ } else if ((trans_speed & 0xf8) == 0x28) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 39;
+ else
+ sd_card->sd_clock = CLK_40;
+
+ } else if ((trans_speed & 0xf8) == 0x20) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 29;
+ else
+ sd_card->sd_clock = CLK_30;
+
+ } else if ((trans_speed & 0xf8) >= 0x10) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 23;
+ else
+ sd_card->sd_clock = CLK_20;
+
+ } else if ((trans_speed & 0x08) >= 0x08) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 19;
+ else
+ sd_card->sd_clock = CLK_20;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card)) {
+ sd_card->capacity = 0;
+ } else {
+ if ((!CHK_SD_HCXC(sd_card)) || (csd_ver == 0)) {
+ u8 blk_size, c_size_mult;
+ u16 c_size;
+ blk_size = rsp[6] & 0x0F;
+ c_size = ((u16)(rsp[7] & 0x03) << 10)
+ + ((u16)rsp[8] << 2)
+ + ((u16)(rsp[9] & 0xC0) >> 6);
+ c_size_mult = (u8)((rsp[10] & 0x03) << 1);
+ c_size_mult += (rsp[11] & 0x80) >> 7;
+ sd_card->capacity = (((u32)(c_size + 1)) *
+ (1 << (c_size_mult + 2)))
+ << (blk_size - 9);
+ } else {
+ u32 total_sector = 0;
+ total_sector = (((u32)rsp[8] & 0x3f) << 16) |
+ ((u32)rsp[9] << 8) | (u32)rsp[10];
+ sd_card->capacity = (total_sector + 1) << 10;
+ }
+ }
+
+ if (check_wp) {
+ if (rsp[15] & 0x30)
+ chip->card_wp |= SD_CARD;
+
+ RTSX_DEBUGP("CSD WP Status: 0x%x\n", rsp[15]);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_set_sample_push_timing(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ u8 val = 0;
+
+ if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
+ val |= 0x10;
+
+ if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_AUTO) {
+ if (chip->asic_code) {
+ if (CHK_SD_HS(sd_card) || CHK_MMC_52M(sd_card)) {
+ if (val & 0x10)
+ val |= 0x04;
+ else
+ val |= 0x08;
+ }
+ } else {
+ if (val & 0x10)
+ val |= 0x04;
+ else
+ val |= 0x08;
+ }
+ } else if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) ==
+ SD_SAMPLE_POINT_DELAY) {
+ if (val & 0x10)
+ val |= 0x04;
+ else
+ val |= 0x08;
+ }
+
+ RTSX_WRITE_REG(chip, REG_SD_CFG1, 0x1C, val);
+
+ return STATUS_SUCCESS;
+}
+
+static void sd_choose_proper_clock(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (CHK_SD_SDR104(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_sdr104_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_sdr104_clk;
+
+ } else if (CHK_SD_DDR50(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_ddr50_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_ddr50_clk;
+
+ } else if (CHK_SD_SDR50(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_sdr50_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_sdr50_clk;
+
+ } else if (CHK_SD_HS(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_hs_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_hs_clk;
+
+ } else if (CHK_MMC_52M(sd_card) || CHK_MMC_DDR52(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_mmc_52m_clk;
+ else
+ sd_card->sd_clock = chip->fpga_mmc_52m_clk;
+
+ } else if (CHK_MMC_26M(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 48;
+ else
+ sd_card->sd_clock = CLK_50;
+ }
+}
+
+static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
+{
+ u8 mask = 0, val = 0;
+
+ mask = 0x60;
+ if (clk_div == SD_CLK_DIVIDE_0)
+ val = 0x00;
+ else if (clk_div == SD_CLK_DIVIDE_128)
+ val = 0x40;
+ else if (clk_div == SD_CLK_DIVIDE_256)
+ val = 0x20;
+
+ RTSX_WRITE_REG(chip, REG_SD_CFG1, mask, val);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_set_init_para(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ retval = sd_set_sample_push_timing(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_choose_proper_clock(chip);
+
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int sd_select_card(struct rtsx_chip *chip, int select)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd_idx, cmd_type;
+ u32 addr;
+
+ if (select) {
+ cmd_idx = SELECT_CARD;
+ cmd_type = SD_RSP_TYPE_R1;
+ addr = sd_card->sd_addr;
+ } else {
+ cmd_idx = DESELECT_CARD;
+ cmd_type = SD_RSP_TYPE_R0;
+ addr = 0;
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, cmd_idx, addr, cmd_type, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_SD_LOCK
+static int sd_update_lock_status(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 rsp[5];
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (rsp[1] & 0x02)
+ sd_card->sd_lock_status |= SD_LOCKED;
+ else
+ sd_card->sd_lock_status &= ~SD_LOCKED;
+
+ RTSX_DEBUGP("sd_card->sd_lock_status = 0x%x\n",
+ sd_card->sd_lock_status);
+
+ if (rsp[1] & 0x01)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
+ u8 data_ready, int polling_cnt)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i;
+ u8 rsp[5];
+
+ for (i = 0; i < polling_cnt; i++) {
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, rsp,
+ 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (((rsp[3] & 0x1E) == state) &&
+ ((rsp[3] & 0x01) == data_ready))
+ return STATUS_SUCCESS;
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
+{
+ int retval;
+
+ if (voltage == SD_IO_3V3) {
+ if (chip->asic_code) {
+ retval = rtsx_write_phy_register(chip, 0x08,
+ 0x4FC0 |
+ chip->phy_voltage);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ }
+ } else if (voltage == SD_IO_1V8) {
+ if (chip->asic_code) {
+ retval = rtsx_write_phy_register(chip, 0x08,
+ 0x4C40 |
+ chip->phy_voltage);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_voltage_switch(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 stat;
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ SD_CLK_TOGGLE_EN);
+
+ retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ udelay(chip->sd_voltage_switch_delay);
+
+ RTSX_READ_REG(chip, SD_BUS_STAT, &stat);
+ if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_FORCE_STOP);
+ retval = sd_change_bank_voltage(chip, SD_IO_1V8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(50);
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN);
+ wait_timeout(10);
+
+ RTSX_READ_REG(chip, SD_BUS_STAT, &stat);
+ if ((stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) !=
+ (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) {
+ RTSX_DEBUGP("SD_BUS_STAT: 0x%x\n", stat);
+ rtsx_write_register(chip, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
+{
+ if (tune_dir == TUNE_RX) {
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RESET | DCM_RX);
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RX);
+ } else {
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RESET | DCM_TX);
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_TX);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ u16 SD_VP_CTL, SD_DCMPS_CTL;
+ u8 val;
+ int retval;
+ int ddr_rx = 0;
+
+ RTSX_DEBUGP("sd_change_phase (sample_point = %d, tune_dir = %d)\n",
+ sample_point, tune_dir);
+
+ if (tune_dir == TUNE_RX) {
+ SD_VP_CTL = SD_VPRX_CTL;
+ SD_DCMPS_CTL = SD_DCMPS_RX_CTL;
+ if (CHK_SD_DDR50(sd_card))
+ ddr_rx = 1;
+ } else {
+ SD_VP_CTL = SD_VPTX_CTL;
+ SD_DCMPS_CTL = SD_DCMPS_TX_CTL;
+ }
+
+ if (chip->asic_code) {
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ RTSX_WRITE_REG(chip, SD_VP_CTL, 0x1F, sample_point);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
+ } else {
+#ifdef CONFIG_RTS5208_DEBUG
+ rtsx_read_register(chip, SD_VP_CTL, &val);
+ RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
+ rtsx_read_register(chip, SD_DCMPS_CTL, &val);
+ RTSX_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val);
+#endif
+
+ if (ddr_rx) {
+ RTSX_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE,
+ PHASE_CHANGE);
+ udelay(50);
+ RTSX_WRITE_REG(chip, SD_VP_CTL, 0xFF,
+ PHASE_CHANGE | PHASE_NOT_RESET | sample_point);
+ } else {
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ udelay(50);
+ RTSX_WRITE_REG(chip, SD_VP_CTL, 0xFF,
+ PHASE_NOT_RESET | sample_point);
+ }
+ udelay(100);
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
+ DCMPS_CHANGE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
+ DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, Fail);
+
+ val = *rtsx_get_cmd_data(chip);
+ if (val & DCMPS_ERROR)
+ TRACE_GOTO(chip, Fail);
+
+ if ((val & DCMPS_CURRENT_PHASE) != sample_point)
+ TRACE_GOTO(chip, Fail);
+
+ RTSX_WRITE_REG(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
+ if (ddr_rx)
+ RTSX_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE, 0);
+ else
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
+
+ udelay(50);
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+
+ return STATUS_SUCCESS;
+
+Fail:
+#ifdef CONFIG_RTS5208_DEBUG
+ rtsx_read_register(chip, SD_VP_CTL, &val);
+ RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
+ rtsx_read_register(chip, SD_DCMPS_CTL, &val);
+ RTSX_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val);
+#endif
+
+ rtsx_write_register(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
+ rtsx_write_register(chip, SD_VP_CTL, PHASE_CHANGE, 0);
+ wait_timeout(10);
+ sd_reset_dcm(chip, tune_dir);
+ return STATUS_FAIL;
+}
+
+static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], buf[8];
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SEND_SCR;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
+ buf, 8, 250);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(sd_card->raw_scr, buf, 8);
+
+ if ((buf[0] & 0x0F) == 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
+ u8 func_to_switch, u8 *buf, int buf_len)
+{
+ u8 support_mask = 0, query_switch = 0, switch_busy = 0;
+ int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ support_offset = FUNCTION_GROUP1_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP1_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP1_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case HS_SUPPORT:
+ support_mask = HS_SUPPORT_MASK;
+ query_switch = HS_QUERY_SWITCH_OK;
+ switch_busy = HS_SWITCH_BUSY;
+ break;
+
+ case SDR50_SUPPORT:
+ support_mask = SDR50_SUPPORT_MASK;
+ query_switch = SDR50_QUERY_SWITCH_OK;
+ switch_busy = SDR50_SWITCH_BUSY;
+ break;
+
+ case SDR104_SUPPORT:
+ support_mask = SDR104_SUPPORT_MASK;
+ query_switch = SDR104_QUERY_SWITCH_OK;
+ switch_busy = SDR104_SWITCH_BUSY;
+ break;
+
+ case DDR50_SUPPORT:
+ support_mask = DDR50_SUPPORT_MASK;
+ query_switch = DDR50_QUERY_SWITCH_OK;
+ switch_busy = DDR50_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else if (func_group == SD_FUNC_GROUP_3) {
+ support_offset = FUNCTION_GROUP3_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP3_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP3_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case DRIVING_TYPE_A:
+ support_mask = DRIVING_TYPE_A_MASK;
+ query_switch = TYPE_A_QUERY_SWITCH_OK;
+ switch_busy = TYPE_A_SWITCH_BUSY;
+ break;
+
+ case DRIVING_TYPE_C:
+ support_mask = DRIVING_TYPE_C_MASK;
+ query_switch = TYPE_C_QUERY_SWITCH_OK;
+ switch_busy = TYPE_C_SWITCH_BUSY;
+ break;
+
+ case DRIVING_TYPE_D:
+ support_mask = DRIVING_TYPE_D_MASK;
+ query_switch = TYPE_D_QUERY_SWITCH_OK;
+ switch_busy = TYPE_D_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ support_offset = FUNCTION_GROUP4_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP4_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP4_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case CURRENT_LIMIT_400:
+ support_mask = CURRENT_LIMIT_400_MASK;
+ query_switch = CURRENT_LIMIT_400_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_400_SWITCH_BUSY;
+ break;
+
+ case CURRENT_LIMIT_600:
+ support_mask = CURRENT_LIMIT_600_MASK;
+ query_switch = CURRENT_LIMIT_600_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_600_SWITCH_BUSY;
+ break;
+
+ case CURRENT_LIMIT_800:
+ support_mask = CURRENT_LIMIT_800_MASK;
+ query_switch = CURRENT_LIMIT_800_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_800_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ if (!(buf[support_offset] & support_mask) ||
+ ((buf[query_switch_offset] & 0x0F) != query_switch)) {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ /* Check 'Busy Status' */
+ if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
+ ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
+ u8 func_group, u8 func_to_switch, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], buf[64];
+
+ RTSX_DEBUGP("sd_check_switch_mode (mode = %d, func_group = %d, func_to_switch = %d)\n",
+ mode, func_group, func_to_switch);
+
+ cmd[0] = 0x40 | SWITCH;
+ cmd[1] = mode;
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0xFF;
+ cmd[4] = 0xF0 + func_to_switch;
+ } else if (func_group == SD_FUNC_GROUP_3) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0xF0 + func_to_switch;
+ cmd[4] = 0xFF;
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0x0F + (func_to_switch << 4);
+ cmd[4] = 0xFF;
+ } else {
+ cmd[1] = SD_CHECK_MODE;
+ cmd[2] = 0xFF;
+ cmd[3] = 0xFF;
+ cmd[4] = 0xFF;
+ }
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
+ buf, 64, 250);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_DUMP(buf, 64);
+
+ if (func_group == NO_ARGUMENT) {
+ sd_card->func_group1_mask = buf[0x0D];
+ sd_card->func_group2_mask = buf[0x0B];
+ sd_card->func_group3_mask = buf[0x09];
+ sd_card->func_group4_mask = buf[0x07];
+
+ RTSX_DEBUGP("func_group1_mask = 0x%02x\n", buf[0x0D]);
+ RTSX_DEBUGP("func_group2_mask = 0x%02x\n", buf[0x0B]);
+ RTSX_DEBUGP("func_group3_mask = 0x%02x\n", buf[0x09]);
+ RTSX_DEBUGP("func_group4_mask = 0x%02x\n", buf[0x07]);
+ } else {
+ /* Maximum current consumption, check whether current is
+ * acceptable; bit[511:496] = 0x0000 means some error happened.
+ */
+ u16 cc = ((u16)buf[0] << 8) | buf[1];
+ RTSX_DEBUGP("Maximum current consumption: %dmA\n", cc);
+ if ((cc == 0) || (cc > 800))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_query_switch_result(chip, func_group,
+ func_to_switch, buf, 64);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((cc > 400) || (func_to_switch > CURRENT_LIMIT_400)) {
+ RTSX_WRITE_REG(chip, OCPPARA2, SD_OCP_THD_MASK,
+ chip->sd_800mA_ocp_thd);
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, PMOS_STRG_MASK,
+ PMOS_STRG_800mA);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
+{
+ if (func_group == SD_FUNC_GROUP_1) {
+ if (func_to_switch > HS_SUPPORT)
+ func_to_switch--;
+
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ if (func_to_switch > CURRENT_LIMIT_200)
+ func_to_switch--;
+ }
+
+ return func_to_switch;
+}
+
+static int sd_check_switch(struct rtsx_chip *chip,
+ u8 func_group, u8 func_to_switch, u8 bus_width)
+{
+ int retval;
+ int i;
+ int switch_good = 0;
+
+ for (i = 0; i < 3; i++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
+ func_to_switch, bus_width);
+ if (retval == STATUS_SUCCESS) {
+ u8 stat;
+
+ retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
+ func_group, func_to_switch, bus_width);
+ if (retval == STATUS_SUCCESS) {
+ switch_good = 1;
+ break;
+ }
+
+ RTSX_READ_REG(chip, SD_STAT1, &stat);
+ if (stat & SD_CRC16_ERR) {
+ RTSX_DEBUGP("SD CRC16 error when switching mode\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ func_to_switch = downgrade_switch_mode(func_group,
+ func_to_switch);
+
+ wait_timeout(20);
+ }
+
+ if (!switch_good)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u8 func_to_switch = 0;
+
+ /* Get supported functions */
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE,
+ NO_ARGUMENT, NO_ARGUMENT, bus_width);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->func_group1_mask &= ~(sd_card->sd_switch_fail);
+
+ /* Function Group 1: Access Mode */
+ for (i = 0; i < 4; i++) {
+ switch ((u8)(chip->sd_speed_prior >> (i*8))) {
+ case SDR104_SUPPORT:
+ if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK)
+ && chip->sdr104_en) {
+ func_to_switch = SDR104_SUPPORT;
+ }
+ break;
+
+ case DDR50_SUPPORT:
+ if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK)
+ && chip->ddr50_en) {
+ func_to_switch = DDR50_SUPPORT;
+ }
+ break;
+
+ case SDR50_SUPPORT:
+ if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK)
+ && chip->sdr50_en) {
+ func_to_switch = SDR50_SUPPORT;
+ }
+ break;
+
+ case HS_SUPPORT:
+ if (sd_card->func_group1_mask & HS_SUPPORT_MASK)
+ func_to_switch = HS_SUPPORT;
+
+ break;
+
+ default:
+ continue;
+ }
+
+
+ if (func_to_switch)
+ break;
+
+ }
+ RTSX_DEBUGP("SD_FUNC_GROUP_1: func_to_switch = 0x%02x", func_to_switch);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_SDR_RST)
+ && (DDR50_SUPPORT == func_to_switch)
+ && (sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
+ func_to_switch = SDR50_SUPPORT;
+ RTSX_DEBUGP("Using SDR50 instead of DDR50 for SD Lock\n");
+ }
+#endif
+
+ if (func_to_switch) {
+ retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
+ bus_width);
+ if (retval != STATUS_SUCCESS) {
+ if (func_to_switch == SDR104_SUPPORT) {
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
+ } else if (func_to_switch == DDR50_SUPPORT) {
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
+ DDR50_SUPPORT_MASK;
+ } else if (func_to_switch == SDR50_SUPPORT) {
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
+ DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (func_to_switch == SDR104_SUPPORT)
+ SET_SD_SDR104(sd_card);
+ else if (func_to_switch == DDR50_SUPPORT)
+ SET_SD_DDR50(sd_card);
+ else if (func_to_switch == SDR50_SUPPORT)
+ SET_SD_SDR50(sd_card);
+ else
+ SET_SD_HS(sd_card);
+ }
+
+ if (CHK_SD_DDR50(sd_card)) {
+ RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0x06, 0x04);
+ retval = sd_set_sample_push_timing(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!func_to_switch || (func_to_switch == HS_SUPPORT)) {
+ /* Do not try to switch current limit if the card doesn't
+ * support UHS mode or we don't want it to support UHS mode
+ */
+ return STATUS_SUCCESS;
+ }
+
+ /* Function Group 4: Current Limit */
+ func_to_switch = 0xFF;
+
+ for (i = 0; i < 4; i++) {
+ switch ((u8)(chip->sd_current_prior >> (i*8))) {
+ case CURRENT_LIMIT_800:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
+ func_to_switch = CURRENT_LIMIT_800;
+
+ break;
+
+ case CURRENT_LIMIT_600:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_600_MASK)
+ func_to_switch = CURRENT_LIMIT_600;
+
+ break;
+
+ case CURRENT_LIMIT_400:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_400_MASK)
+ func_to_switch = CURRENT_LIMIT_400;
+
+ break;
+
+ case CURRENT_LIMIT_200:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_200_MASK)
+ func_to_switch = CURRENT_LIMIT_200;
+
+ break;
+
+ default:
+ continue;
+ }
+
+ if (func_to_switch != 0xFF)
+ break;
+ }
+
+ RTSX_DEBUGP("SD_FUNC_GROUP_4: func_to_switch = 0x%02x", func_to_switch);
+
+ if (func_to_switch <= CURRENT_LIMIT_800) {
+ retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
+ bus_width);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ RTSX_DEBUGP("Switch current limit finished! (%d)\n", retval);
+ }
+
+ if (CHK_SD_DDR50(sd_card))
+ RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0x06, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_wait_data_idle(struct rtsx_chip *chip)
+{
+ int retval = STATUS_TIMEDOUT;
+ int i;
+ u8 val = 0;
+
+ for (i = 0; i < 100; i++) {
+ RTSX_READ_REG(chip, SD_DATA_STATE, &val);
+ if (val & SD_DATA_IDLE) {
+ retval = STATUS_SUCCESS;
+ break;
+ }
+ udelay(100);
+ }
+ RTSX_DEBUGP("SD_DATA_STATE: 0x%02x\n", val);
+
+ return retval;
+}
+
+static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ int retval;
+ u8 cmd[5];
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SEND_TUNING_PATTERN;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_AUTO_TUNING,
+ cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ (void)sd_wait_data_idle(chip);
+
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5];
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("sd ddr tuning rx\n");
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SD_STATUS;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ,
+ cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ (void)sd_wait_data_idle(chip);
+
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("mmc ddr tuning rx\n");
+
+ cmd[0] = 0x40 | SEND_EXT_CSD;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ,
+ cmd, 5, 0x200, 1, bus_width, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ (void)sd_wait_data_idle(chip);
+
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
+ rtsx_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SD(sd_card)) {
+ bus_width = SD_BUS_WIDTH_4;
+ } else {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ }
+
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ cmd[0] = 0x40 | PROGRAM_CSD;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2,
+ cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
+ u8 tune_dir)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct timing_phase_path path[MAX_PHASE + 1];
+ int i, j, cont_path_cnt;
+ int new_block, max_len, final_path_idx;
+ u8 final_phase = 0xFF;
+
+ if (phase_map == 0xFFFFFFFF) {
+ if (tune_dir == TUNE_RX)
+ final_phase = (u8)chip->sd_default_rx_phase;
+ else
+ final_phase = (u8)chip->sd_default_tx_phase;
+
+ goto Search_Finish;
+ }
+
+ cont_path_cnt = 0;
+ new_block = 1;
+ j = 0;
+ for (i = 0; i < MAX_PHASE + 1; i++) {
+ if (phase_map & (1 << i)) {
+ if (new_block) {
+ new_block = 0;
+ j = cont_path_cnt++;
+ path[j].start = i;
+ path[j].end = i;
+ } else {
+ path[j].end = i;
+ }
+ } else {
+ new_block = 1;
+ if (cont_path_cnt) {
+ int idx = cont_path_cnt - 1;
+ path[idx].len = path[idx].end -
+ path[idx].start + 1;
+ path[idx].mid = path[idx].start +
+ path[idx].len / 2;
+ }
+ }
+ }
+
+ if (cont_path_cnt == 0) {
+ RTSX_DEBUGP("No continuous phase path\n");
+ goto Search_Finish;
+ } else {
+ int idx = cont_path_cnt - 1;
+ path[idx].len = path[idx].end - path[idx].start + 1;
+ path[idx].mid = path[idx].start + path[idx].len / 2;
+ }
+
+ if ((path[0].start == 0) &&
+ (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
+ path[0].len += path[cont_path_cnt - 1].len;
+ path[0].mid = path[0].start + path[0].len / 2;
+ if (path[0].mid < 0)
+ path[0].mid += MAX_PHASE + 1;
+
+ cont_path_cnt--;
+ }
+
+ max_len = 0;
+ final_phase = 0;
+ final_path_idx = 0;
+ for (i = 0; i < cont_path_cnt; i++) {
+ if (path[i].len > max_len) {
+ max_len = path[i].len;
+ final_phase = (u8)path[i].mid;
+ final_path_idx = i;
+ }
+
+ RTSX_DEBUGP("path[%d].start = %d\n", i, path[i].start);
+ RTSX_DEBUGP("path[%d].end = %d\n", i, path[i].end);
+ RTSX_DEBUGP("path[%d].len = %d\n", i, path[i].len);
+ RTSX_DEBUGP("path[%d].mid = %d\n", i, path[i].mid);
+ RTSX_DEBUGP("\n");
+ }
+
+ if (tune_dir == TUNE_TX) {
+ if (CHK_SD_SDR104(sd_card)) {
+ if (max_len > 15) {
+ int temp_mid = (max_len - 16) / 2;
+ int temp_final_phase =
+ path[final_path_idx].end -
+ (max_len - (6 + temp_mid));
+
+ if (temp_final_phase < 0)
+ final_phase = (u8)(temp_final_phase +
+ MAX_PHASE + 1);
+ else
+ final_phase = (u8)temp_final_phase;
+ }
+ } else if (CHK_SD_SDR50(sd_card)) {
+ if (max_len > 12) {
+ int temp_mid = (max_len - 13) / 2;
+ int temp_final_phase =
+ path[final_path_idx].end -
+ (max_len - (3 + temp_mid));
+
+ if (temp_final_phase < 0)
+ final_phase = (u8)(temp_final_phase +
+ MAX_PHASE + 1);
+ else
+ final_phase = (u8)temp_final_phase;
+ }
+ }
+ }
+
+Search_Finish:
+ RTSX_DEBUGP("Final chosen phase: %d\n", final_phase);
+ return final_phase;
+}
+
+static int sd_tuning_rx(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i, j;
+ u32 raw_phase_map[3], phase_map;
+ u8 final_phase;
+ int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point);
+
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ tuning_cmd = sd_ddr_tuning_rx_cmd;
+ else
+ tuning_cmd = sd_sdr_tuning_rx_cmd;
+
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ tuning_cmd = mmc_ddr_tunning_rx_cmd;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < 3; i++) {
+ raw_phase_map[i] = 0;
+ for (j = MAX_PHASE; j >= 0; j--) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = tuning_cmd(chip, (u8)j);
+ if (retval == STATUS_SUCCESS)
+ raw_phase_map[i] |= 1 << j;
+ }
+ }
+
+ phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
+ for (i = 0; i < 3; i++)
+ RTSX_DEBUGP("RX raw_phase_map[%d] = 0x%08x\n", i,
+ raw_phase_map[i]);
+
+ RTSX_DEBUGP("RX phase_map = 0x%08x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_RX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_change_phase(chip, final_phase, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u32 phase_map;
+ u8 final_phase;
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ phase_map = 0;
+ for (i = MAX_PHASE; i >= 0; i--) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ rtsx_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_change_phase(chip, (u8)i, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if ((retval == STATUS_SUCCESS) ||
+ !sd_check_err_code(chip, SD_RSP_TIMEOUT))
+ phase_map |= 1 << i;
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ RTSX_DEBUGP("DDR TX pre tune phase_map = 0x%08x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_change_phase(chip, final_phase, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("DDR TX pre tune phase: %d\n", (int)final_phase);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_tuning_tx(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i, j;
+ u32 raw_phase_map[3], phase_map;
+ u8 final_phase;
+ int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point);
+
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ tuning_cmd = sd_ddr_tuning_tx_cmd;
+ else
+ tuning_cmd = sd_sdr_tuning_tx_cmd;
+
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ tuning_cmd = sd_ddr_tuning_tx_cmd;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < 3; i++) {
+ raw_phase_map[i] = 0;
+ for (j = MAX_PHASE; j >= 0; j--) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ rtsx_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = tuning_cmd(chip, (u8)j);
+ if (retval == STATUS_SUCCESS)
+ raw_phase_map[i] |= 1 << j;
+ }
+ }
+
+ phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
+ for (i = 0; i < 3; i++)
+ RTSX_DEBUGP("TX raw_phase_map[%d] = 0x%08x\n",
+ i, raw_phase_map[i]);
+
+ RTSX_DEBUGP("TX phase_map = 0x%08x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_change_phase(chip, final_phase, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_sdr_tuning(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning(struct rtsx_chip *chip)
+{
+ int retval;
+
+ if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_ddr_pre_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
+ TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_ddr_tuning(struct rtsx_chip *chip)
+{
+ int retval;
+
+ if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_ddr_pre_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
+ TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_switch_clock(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int re_tuning = 0;
+
+ retval = select_card(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (re_tuning) {
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ retval = sd_ddr_tuning(chip);
+ else
+ retval = sd_sdr_tuning(chip);
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ retval = mmc_ddr_tuning(chip);
+ }
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_prepare_reset(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (chip->asic_code)
+ sd_card->sd_clock = 29;
+ else
+ sd_card->sd_clock = CLK_30;
+
+ sd_card->sd_type = 0;
+ sd_card->seq_mode = 0;
+ sd_card->sd_data_buf_ready = 0;
+ sd_card->capacity = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->sd_io = 0;
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTSX_WRITE_REG(chip, REG_SD_CFG1, 0xFF, 0x40);
+
+ RTSX_WRITE_REG(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ retval = select_card(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
+ SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
+ SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_pull_ctl_enable(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ 0xA8);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ 0x5A);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ 0x95);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ 0xAA);
+ }
+ }
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_init_power(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = sd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode)
+ wait_timeout(250);
+
+ retval = enable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_SD_PULL_CTL_BIT | 0x20,
+ 0);
+ }
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_on(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(260);
+
+#ifdef SUPPORT_OCP
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ RTSX_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_dummy_clock(struct rtsx_chip *chip)
+{
+ RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x01, 0x01);
+ wait_timeout(5);
+ RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x01, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_read_lba0(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ cmd[0] = 0x40 | READ_SINGLE_BLOCK;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ if (CHK_SD(sd_card)) {
+ bus_width = SD_BUS_WIDTH_4;
+ } else {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ }
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
+ 5, 512, 1, bus_width, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_wp_state(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u32 val;
+ u16 sd_card_type;
+ u8 cmd[5], buf[64];
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SD_STATUS;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
+ SD_BUS_WIDTH_4, buf, 64, 250);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_DEBUGP("ACMD13:\n");
+ RTSX_DUMP(buf, 64);
+
+ sd_card_type = ((u16)buf[2] << 8) | buf[3];
+ RTSX_DEBUGP("sd_card_type = 0x%04x\n", sd_card_type);
+ if ((sd_card_type == 0x0001) || (sd_card_type == 0x0002)) {
+ /* ROM card or OTP */
+ chip->card_wp |= SD_CARD;
+ }
+
+ /* Check SD Machanical Write-Protect Switch */
+ val = rtsx_readl(chip, RTSX_BIPR);
+ if (val & SD_WRITE_PROTECT)
+ chip->card_wp |= SD_CARD;
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_sd(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i = 0, j = 0, k = 0, hi_cap_flow = 0;
+ int sd_dont_switch = 0;
+ int support_1v8 = 0;
+ int try_sdio = 1;
+ u8 rsp[16];
+ u8 switch_bus_width;
+ u32 voltage = 0;
+ int sd20_mode = 0;
+
+ SET_SD(sd_card);
+
+Switch_Fail:
+
+ i = 0;
+ j = 0;
+ k = 0;
+ hi_cap_flow = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
+ goto SD_UNLOCK_ENTRY;
+#endif
+
+ retval = sd_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_dummy_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
+ int rty_cnt = 0;
+
+ for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
+ SD_RSP_TYPE_R4, rsp, 5);
+ if (retval == STATUS_SUCCESS) {
+ int func_num = (rsp[1] >> 4) & 0x07;
+ if (func_num) {
+ RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
+ chip->sd_io = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ break;
+ }
+
+ sd_init_power(chip);
+
+ sd_dummy_clock(chip);
+ }
+
+ RTSX_DEBUGP("Normal card!\n");
+ }
+
+ /* Start Initialization Process of SD Card */
+RTY_SD_RST:
+ retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(20);
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
+ SD_RSP_TYPE_R7, rsp, 5);
+ if (retval == STATUS_SUCCESS) {
+ if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
+ hi_cap_flow = 1;
+ voltage = SUPPORT_VOLTAGE | 0x40000000;
+ }
+ }
+
+ if (!hi_cap_flow) {
+ voltage = SUPPORT_VOLTAGE;
+
+ retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
+ SD_RSP_TYPE_R0, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(20);
+ }
+
+ do {
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ j++;
+ if (j < 3)
+ goto RTY_SD_RST;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
+ SD_RSP_TYPE_R3, rsp, 5);
+ if (retval != STATUS_SUCCESS) {
+ k++;
+ if (k < 3)
+ goto RTY_SD_RST;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i++;
+ wait_timeout(20);
+ } while (!(rsp[1] & 0x80) && (i < 255));
+
+ if (i == 255)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (hi_cap_flow) {
+ if (rsp[1] & 0x40)
+ SET_SD_HCXC(sd_card);
+ else
+ CLR_SD_HCXC(sd_card);
+
+ support_1v8 = 0;
+ } else {
+ CLR_SD_HCXC(sd_card);
+ support_1v8 = 0;
+ }
+ RTSX_DEBUGP("support_1v8 = %d\n", support_1v8);
+
+ if (support_1v8) {
+ retval = sd_voltage_switch(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < 3; i++) {
+ retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
+ SD_RSP_TYPE_R6, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->sd_addr = (u32)rsp[1] << 24;
+ sd_card->sd_addr += (u32)rsp[2] << 16;
+
+ if (sd_card->sd_addr)
+ break;
+ }
+
+ retval = sd_check_csd(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef SUPPORT_SD_LOCK
+SD_UNLOCK_ENTRY:
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
+ return STATUS_SUCCESS;
+ } else if (!(sd_card->sd_lock_status & SD_UNLOCK_POW_ON)) {
+ sd_card->sd_lock_status &= ~SD_PWD_EXIST;
+ }
+#endif
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (support_1v8) {
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ switch_bus_width = SD_BUS_WIDTH_4;
+ } else {
+ switch_bus_width = SD_BUS_WIDTH_1;
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(sd_card->raw_csd[4] & 0x40))
+ sd_dont_switch = 1;
+
+ if (!sd_dont_switch) {
+ if (sd20_mode) {
+ /* Set sd_switch_fail here, because we needn't
+ * switch to UHS mode
+ */
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
+ DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
+ }
+
+ /* Check the card whether follow SD1.1 spec or higher */
+ retval = sd_check_spec(chip, switch_bus_width);
+ if (retval == STATUS_SUCCESS) {
+ retval = sd_switch_function(chip, switch_bus_width);
+ if (retval != STATUS_SUCCESS) {
+ sd_init_power(chip);
+ sd_dont_switch = 1;
+ try_sdio = 0;
+
+ goto Switch_Fail;
+ }
+ } else {
+ if (support_1v8) {
+ sd_init_power(chip);
+ sd_dont_switch = 1;
+ try_sdio = 0;
+
+ goto Switch_Fail;
+ }
+ }
+ }
+
+ if (!support_1v8) {
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+
+ if (!sd20_mode && CHK_SD30_SPEED(sd_card)) {
+ int read_lba0 = 1;
+
+ RTSX_WRITE_REG(chip, SD30_DRIVE_SEL, 0x07,
+ chip->sd30_drive_sel_1v8);
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SD_DDR50(sd_card))
+ retval = sd_ddr_tuning(chip);
+ else
+ retval = sd_sdr_tuning(chip);
+
+ if (retval != STATUS_SUCCESS) {
+ if (sd20_mode) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ try_sdio = 0;
+ sd20_mode = 1;
+ goto Switch_Fail;
+ }
+ }
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+
+ if (CHK_SD_DDR50(sd_card)) {
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval != STATUS_SUCCESS)
+ read_lba0 = 0;
+ }
+
+ if (read_lba0) {
+ retval = sd_read_lba0(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd20_mode) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ try_sdio = 0;
+ sd20_mode = 1;
+ goto Switch_Fail;
+ }
+ }
+ }
+ }
+
+ retval = sd_check_wp_state(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_H, 0xFF, 0x02);
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_L, 0xFF, 0x00);
+ }
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+
+static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 buf[8] = {0}, bus_width, *ptr;
+ u16 byte_cnt;
+ int len;
+
+ retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_FAIL);
+
+ if (width == MMC_8BIT_BUS) {
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+ len = 8;
+ byte_cnt = 8;
+ bus_width = SD_BUS_WIDTH_8;
+ } else {
+ buf[0] = 0x5A;
+ len = 4;
+ byte_cnt = 4;
+ bus_width = SD_BUS_WIDTH_4;
+ }
+
+ retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_ERR);
+
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
+ NULL, 0, byte_cnt, 1, bus_width, buf, len, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
+ TRACE_RET(chip, SWITCH_ERR);
+ }
+
+ retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_ERR);
+
+ RTSX_DEBUGP("SD/MMC CMD %d\n", BUSTEST_R);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | BUSTEST_R);
+
+ if (width == MMC_8BIT_BUS)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
+ 0xFF, 0x08);
+ else
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
+ 0xFF, 0x04);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
+ if (width == MMC_8BIT_BUS)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 1, 0, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval < 0) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, SWITCH_ERR);
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if (width == MMC_8BIT_BUS) {
+ RTSX_DEBUGP("BUSTEST_R [8bits]: 0x%02x 0x%02x\n", ptr[0],
+ ptr[1]);
+ if ((ptr[0] == 0xAA) && (ptr[1] == 0x55)) {
+ u8 rsp[5];
+ u32 arg;
+
+ if (CHK_MMC_DDR52(sd_card))
+ arg = 0x03B70600;
+ else
+ arg = 0x03B70200;
+
+ retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval == STATUS_SUCCESS) &&
+ !(rsp[4] & MMC_SWITCH_ERR))
+ return SWITCH_SUCCESS;
+ }
+ } else {
+ RTSX_DEBUGP("BUSTEST_R [4bits]: 0x%02x\n", ptr[0]);
+ if (ptr[0] == 0xA5) {
+ u8 rsp[5];
+ u32 arg;
+
+ if (CHK_MMC_DDR52(sd_card))
+ arg = 0x03B70500;
+ else
+ arg = 0x03B70100;
+
+ retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval == STATUS_SUCCESS) &&
+ !(rsp[4] & MMC_SWITCH_ERR))
+ return SWITCH_SUCCESS;
+ }
+ }
+
+ TRACE_RET(chip, SWITCH_FAIL);
+}
+
+
+static int mmc_switch_timing_bus(struct rtsx_chip *chip, int switch_ddr)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 *ptr, card_type, card_type_mask = 0;
+
+ CLR_MMC_HS(sd_card);
+
+ RTSX_DEBUGP("SD/MMC CMD %d\n", SEND_EXT_CSD);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
+ 0x40 | SEND_EXT_CSD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 2);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 213, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 214, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 215, 0xFF, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 1000);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ rtsx_clear_sd_error(chip);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ptr = rtsx_get_cmd_data(chip);
+ if (ptr[0] & SD_TRANSFER_ERR) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card)) {
+ sd_card->capacity = ((u32)ptr[5] << 24) | ((u32)ptr[4] << 16) |
+ ((u32)ptr[3] << 8) | ((u32)ptr[2]);
+ }
+
+ card_type_mask = 0x03;
+ card_type = ptr[1] & card_type_mask;
+ if (card_type) {
+ u8 rsp[5];
+
+ if (card_type & 0x04) {
+ if (switch_ddr)
+ SET_MMC_DDR52(sd_card);
+ else
+ SET_MMC_52M(sd_card);
+ } else if (card_type & 0x02) {
+ SET_MMC_52M(sd_card);
+ } else {
+ SET_MMC_26M(sd_card);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SWITCH,
+ 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
+ CLR_MMC_HS(sd_card);
+ }
+
+ sd_choose_proper_clock(chip);
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Test Bus Procedure */
+ retval = mmc_test_switch_bus(chip, MMC_8BIT_BUS);
+ if (retval == SWITCH_SUCCESS) {
+ SET_MMC_8BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 8;
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+ } else if (retval == SWITCH_FAIL) {
+ retval = mmc_test_switch_bus(chip, MMC_4BIT_BUS);
+ if (retval == SWITCH_SUCCESS) {
+ SET_MMC_4BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+ } else if (retval == SWITCH_FAIL) {
+ CLR_MMC_8BIT(sd_card);
+ CLR_MMC_4BIT(sd_card);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int reset_mmc(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i = 0, j = 0, k = 0;
+ int switch_ddr = 1;
+ u8 rsp[16];
+ u8 spec_ver = 0;
+ u32 temp;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
+ goto MMC_UNLOCK_ENTRY;
+#endif
+
+Switch_Fail:
+ retval = sd_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ SET_MMC(sd_card);
+
+RTY_MMC_RST:
+ retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ do {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
+ (SUPPORT_VOLTAGE | 0x40000000),
+ SD_RSP_TYPE_R3, rsp, 5);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_BUSY) ||
+ sd_check_err_code(chip, SD_TO_ERR)) {
+ k++;
+ if (k < 20) {
+ sd_clr_err_code(chip);
+ goto RTY_MMC_RST;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ j++;
+ if (j < 100) {
+ sd_clr_err_code(chip);
+ goto RTY_MMC_RST;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ wait_timeout(20);
+ i++;
+ } while (!(rsp[1] & 0x80) && (i < 255));
+
+ if (i == 255)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((rsp[1] & 0x60) == 0x40)
+ SET_MMC_SECTOR_MODE(sd_card);
+ else
+ CLR_MMC_SECTOR_MODE(sd_card);
+
+ retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->sd_addr = 0x00100000;
+ retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
+ SD_RSP_TYPE_R6, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_check_csd(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ spec_ver = (sd_card->raw_csd[0] & 0x3C) >> 2;
+
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef SUPPORT_SD_LOCK
+MMC_UNLOCK_ENTRY:
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 1;
+
+ if (!sd_card->mmc_dont_switch_bus) {
+ if (spec_ver == 4) {
+ /* MMC 4.x Cards */
+ retval = mmc_switch_timing_bus(chip, switch_ddr);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ sd_card->mmc_dont_switch_bus = 1;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (switch_ddr && CHK_MMC_DDR52(sd_card)) {
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mmc_ddr_tuning(chip);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ switch_ddr = 0;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
+
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval == STATUS_SUCCESS) {
+ retval = sd_read_lba0(chip);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ switch_ddr = 0;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
+ }
+ }
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_H, 0xFF, 0x02);
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_L, 0xFF, 0x00);
+ }
+#endif
+
+ temp = rtsx_readl(chip, RTSX_BIPR);
+ if (temp & SD_WRITE_PROTECT)
+ chip->card_wp |= SD_CARD;
+
+ return STATUS_SUCCESS;
+}
+
+int reset_sd_card(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ sd_init_reg_addr(chip);
+
+ memset(sd_card, 0, sizeof(struct sd_info));
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+
+ retval = enable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
+ !CHK_SDIO_IGNORED(chip)) {
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = rtsx_write_register(chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = card_share_mode(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->sd_io = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->sd_ctl & RESET_MMC_FIRST) {
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->sd_io) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("sd_card->sd_type = 0x%x\n", sd_card->sd_type);
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_mmc_only(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ sd_card->sd_type = 0;
+ sd_card->seq_mode = 0;
+ sd_card->sd_data_buf_ready = 0;
+ sd_card->capacity = 0;
+ sd_card->sd_switch_fail = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity = 0;
+
+ retval = enable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("In reset_mmc_only, sd_card->sd_type = 0x%x\n",
+ sd_card->sd_type);
+
+ return STATUS_SUCCESS;
+}
+
+#define WAIT_DATA_READY_RTY_CNT 255
+
+static int wait_data_buf_ready(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int i, retval;
+
+ for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ sd_card->sd_data_buf_ready = 0;
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (sd_card->sd_data_buf_ready) {
+ return sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ }
+ }
+
+ sd_set_err_code(chip, SD_TO_ERR);
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+void sd_stop_seq_mode(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (sd_card->seq_mode) {
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ sd_set_err_code(chip, SD_STS_ERR);
+
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval != STATUS_SUCCESS)
+ sd_set_err_code(chip, SD_STS_ERR);
+
+ sd_card->seq_mode = 0;
+
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ }
+}
+
+static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (chip->asic_code) {
+ if (sd_card->sd_clock > 30)
+ sd_card->sd_clock -= 20;
+ } else {
+ switch (sd_card->sd_clock) {
+ case CLK_200:
+ sd_card->sd_clock = CLK_150;
+ break;
+
+ case CLK_150:
+ sd_card->sd_clock = CLK_120;
+ break;
+
+ case CLK_120:
+ sd_card->sd_clock = CLK_100;
+ break;
+
+ case CLK_100:
+ sd_card->sd_clock = CLK_80;
+ break;
+
+ case CLK_80:
+ sd_card->sd_clock = CLK_60;
+ break;
+
+ case CLK_60:
+ sd_card->sd_clock = CLK_50;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ u32 data_addr;
+ u8 cfg2;
+ int retval;
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ RTSX_DEBUGP("sd_rw: Read %d %s from 0x%x\n", sector_cnt,
+ (sector_cnt > 1) ? "sectors" : "sector", start_sector);
+ } else {
+ RTSX_DEBUGP("sd_rw: Write %d %s to 0x%x\n", sector_cnt,
+ (sector_cnt > 1) ? "sectors" : "sector", start_sector);
+ }
+
+ sd_card->cleanup_counter = 0;
+
+ if (!(chip->card_ready & SD_CARD)) {
+ sd_card->seq_mode = 0;
+
+ retval = reset_sd_card(chip);
+ if (retval == STATUS_SUCCESS) {
+ chip->card_ready |= SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ } else {
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->rw_need_retry = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (!CHK_SD_HCXC(sd_card) && !CHK_MMC_SECTOR_MODE(sd_card))
+ data_addr = start_sector << 9;
+ else
+ data_addr = start_sector;
+
+ sd_clr_err_code(chip);
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_IO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if (sd_card->seq_mode &&
+ ((sd_card->pre_dir != srb->sc_data_direction) ||
+ ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
+ start_sector))) {
+ if ((sd_card->pre_sec_cnt < 0x80)
+ && (sd_card->pre_dir == DMA_FROM_DEVICE)
+ && !CHK_SD30_SPEED(sd_card)
+ && !CHK_SD_HS(sd_card)
+ && !CHK_MMC_HS(sd_card)) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ chip->rw_need_retry = 1;
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ sd_card->seq_mode = 0;
+
+ retval = rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_IO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if ((sd_card->pre_sec_cnt < 0x80)
+ && !CHK_SD30_SPEED(sd_card)
+ && !CHK_SD_HS(sd_card)
+ && !CHK_MMC_HS(sd_card)) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ (u8)sector_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ (u8)(sector_cnt >> 8));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+
+ if (CHK_MMC_8BIT(sd_card))
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
+ 0x03, SD_BUS_WIDTH_8);
+ else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
+ 0x03, SD_BUS_WIDTH_4);
+ else
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
+ 0x03, SD_BUS_WIDTH_1);
+
+ if (sd_card->seq_mode) {
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16|
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
+ SD_RSP_LEN_0;
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
+
+ trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
+ DMA_512);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_3 | SD_TRANSFER_START);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ }
+
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ RTSX_DEBUGP("SD/MMC CMD %d\n", READ_MULTIPLE_BLOCK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
+ 0x40 | READ_MULTIPLE_BLOCK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
+ (u8)(data_addr >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
+ (u8)(data_addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
+ (u8)(data_addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
+ (u8)data_addr);
+
+ cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
+ SD_RSP_LEN_6;
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ cfg2);
+
+ trans_dma_enable(srb->sc_data_direction, chip,
+ sector_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+ } else {
+ retval = rtsx_send_cmd(chip, SD_CARD, 50);
+ if (retval < 0) {
+ rtsx_clear_sd_error(chip);
+
+ chip->rw_need_retry = 1;
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ retval = wait_data_buf_ready(chip);
+ if (retval != STATUS_SUCCESS) {
+ chip->rw_need_retry = 1;
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
+ data_addr, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ chip->rw_need_retry = 1;
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END |
+ SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ cfg2);
+
+ trans_dma_enable(srb->sc_data_direction, chip,
+ sector_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+ }
+
+ sd_card->seq_mode = 1;
+ }
+
+ retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ srb->sc_data_direction, chip->sd_timeout);
+ if (retval < 0) {
+ u8 stat = 0;
+ int err;
+
+ sd_card->seq_mode = 0;
+
+ if (retval == -ETIMEDOUT)
+ err = STATUS_TIMEDOUT;
+ else
+ err = STATUS_FAIL;
+
+ rtsx_read_register(chip, REG_SD_STAT1, &stat);
+ rtsx_clear_sd_error(chip);
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ chip->rw_need_retry = 0;
+ RTSX_DEBUGP("No card exist, exit sd_rw\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ chip->rw_need_retry = 1;
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if (stat & (SD_CRC7_ERR | SD_CRC16_ERR | SD_CRC_WRITE_ERR)) {
+ RTSX_DEBUGP("SD CRC error, tune clock!\n");
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if (err == STATUS_TIMEDOUT) {
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ TRACE_RET(chip, err);
+ }
+
+ sd_card->pre_sec_addr = start_sector;
+ sd_card->pre_sec_cnt = sector_cnt;
+ sd_card->pre_dir = srb->sc_data_direction;
+
+ return STATUS_SUCCESS;
+
+RW_FAIL:
+ sd_card->seq_mode = 0;
+
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ chip->rw_need_retry = 0;
+ RTSX_DEBUGP("No card exist, exit sd_rw\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (sd_check_err_code(chip, SD_CRC_ERR)) {
+ if (CHK_MMC_4BIT(sd_card) || CHK_MMC_8BIT(sd_card)) {
+ sd_card->mmc_dont_switch_bus = 1;
+ reset_mmc_only(chip);
+ sd_card->mmc_dont_switch_bus = 0;
+ } else {
+ sd_card->need_retune = 1;
+ sd_auto_tune_clock(chip);
+ }
+ } else if (sd_check_err_code(chip, SD_TO_ERR | SD_STS_ERR)) {
+ retval = reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ }
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+#ifdef SUPPORT_CPRM
+int soft_reset_sd_card(struct rtsx_chip *chip)
+{
+ return reset_sd(chip);
+}
+
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, int special_check)
+{
+ int retval;
+ int timeout = 100;
+ u16 reg_addr;
+ u8 *ptr;
+ int stat_idx = 0;
+ int rty_cnt = 0;
+
+ RTSX_DEBUGP("EXT SD/MMC CMD %d\n", cmd_idx);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+RTY_SEND_CMD:
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 17;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 6;
+ }
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0, 0);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ rtsx_clear_sd_error(chip);
+
+ if (rsp_type & SD_WAIT_BUSY_END) {
+ retval = sd_check_data0_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ sd_set_err_code(chip, SD_TO_ERR);
+ }
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if ((ptr[0] & 0xC0) != 0) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (rty_cnt < SD_MAX_RETRY_COUNT) {
+ wait_timeout(20);
+ rty_cnt++;
+ goto RTY_SEND_CMD;
+ } else {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
+ (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
+ if ((cmd_idx != STOP_TRANSMISSION) && (special_check == 0)) {
+ if (ptr[1] & 0x80)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (ptr[1] & 0x7D)
+#else
+ if (ptr[1] & 0x7F)
+#endif
+ {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[2] & 0xF8)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (cmd_idx == SELECT_CARD) {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ if ((ptr[3] & 0x1E) != 0x04)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else if (rsp_type == SD_RSP_TYPE_R0) {
+ if ((ptr[3] & 0x1E) != 0x03)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if (rsp && rsp_len)
+ memcpy(rsp, ptr, rsp_len);
+
+ return STATUS_SUCCESS;
+}
+
+int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
+{
+ int retval, rsp_len;
+ u16 reg_addr;
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ rtsx_init_cmd(chip);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
+
+ rsp_len = 17;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
+
+ rsp_len = 6;
+ }
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0xFF, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (rsp) {
+ int min_len = (rsp_len < len) ? rsp_len : len;
+
+ memcpy(rsp, rtsx_get_cmd_data(chip), min_len);
+
+ RTSX_DEBUGP("min_len = %d\n", min_len);
+ RTSX_DEBUGP("Response in cmd buf: 0x%x 0x%x 0x%x 0x%x\n",
+ rsp[0], rsp[1], rsp[2], rsp[3]);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int len;
+ u8 buf[18] = {
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x0E,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x53,
+ 0x44,
+ 0x20,
+ 0x43,
+ 0x61,
+ 0x72,
+ 0x64,
+ 0x00,
+ 0x00,
+ 0x00,
+ };
+
+ sd_card->pre_cmd_err = 0;
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
+ (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
+ (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
+ (0x64 != srb->cmnd[8])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[1] & 0x0F) {
+ case 0:
+ sd_card->sd_pass_thru_en = 0;
+ break;
+
+ case 1:
+ sd_card->sd_pass_thru_en = 1;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf[5] = (1 == CHK_SD(sd_card)) ? 0x01 : 0x02;
+ if (chip->card_wp & SD_CARD)
+ buf[5] |= 0x80;
+
+ buf[6] = (u8)(sd_card->sd_addr >> 16);
+ buf[7] = (u8)(sd_card->sd_addr >> 24);
+
+ buf[15] = chip->max_lun;
+
+ len = min_t(int, 18, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
+ int *rsp_len)
+{
+ if (!rsp_type || !rsp_len)
+ return STATUS_FAIL;
+
+ switch (srb->cmnd[10]) {
+ case 0x03:
+ *rsp_type = SD_RSP_TYPE_R0;
+ *rsp_len = 0;
+ break;
+
+ case 0x04:
+ *rsp_type = SD_RSP_TYPE_R1;
+ *rsp_len = 6;
+ break;
+
+ case 0x05:
+ *rsp_type = SD_RSP_TYPE_R1b;
+ *rsp_len = 6;
+ break;
+
+ case 0x06:
+ *rsp_type = SD_RSP_TYPE_R2;
+ *rsp_len = 17;
+ break;
+
+ case 0x07:
+ *rsp_type = SD_RSP_TYPE_R3;
+ *rsp_len = 6;
+ break;
+
+ default:
+ return STATUS_FAIL;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, rsp_len;
+ u8 cmd_idx, rsp_type;
+ u8 standby = 0, acmd = 0;
+ u32 arg;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) |
+ ((u32)srb->cmnd[5] << 8) | srb->cmnd[6];
+
+ retval = get_rsp_type(srb, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#else
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#endif
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+ retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
+ sd_card->rsp, rsp_len, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+#endif
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+
+SD_Execute_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, rsp_len, i;
+ int cmd13_checkbit = 0, read_err = 0;
+ u8 cmd_idx, rsp_type, bus_width;
+ u8 send_cmd12 = 0, standby = 0, acmd = 0;
+ u32 data_len;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x04)
+ send_cmd12 = 1;
+
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8]
+ << 8) | srb->cmnd[9];
+
+ retval = get_rsp_type(srb, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ } else {
+ bus_width = SD_BUS_WIDTH_4;
+ }
+ RTSX_DEBUGP("bus_width = %d\n", bus_width);
+#else
+ bus_width = SD_BUS_WIDTH_4;
+#endif
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (data_len <= 512) {
+ int min_len;
+ u8 *buf;
+ u16 byte_cnt, blk_cnt;
+ u8 cmd[5];
+
+ byte_cnt = ((u16)(srb->cmnd[8] & 0x03) << 8) | srb->cmnd[9];
+ blk_cnt = 1;
+
+ cmd[0] = 0x40 | cmd_idx;
+ cmd[1] = srb->cmnd[3];
+ cmd[2] = srb->cmnd[4];
+ cmd[3] = srb->cmnd[5];
+ cmd[4] = srb->cmnd[6];
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
+ blk_cnt, bus_width, buf, data_len, 2000);
+ if (retval != STATUS_SUCCESS) {
+ read_err = 1;
+ kfree(buf);
+ rtsx_clear_sd_error(chip);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ min_len = min(data_len, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, min_len, srb);
+
+ kfree(buf);
+ } else if (!(data_len & 0x1FF)) {
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ 0x02);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
+ 0x40 | cmd_idx);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
+ srb->cmnd[3]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
+ srb->cmnd[4]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
+ srb->cmnd[5]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
+ srb->cmnd[6]);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
+ 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ DMA_FROM_DEVICE, 10000);
+ if (retval < 0) {
+ read_err = 1;
+ rtsx_clear_sd_error(chip);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ } else {
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (send_cmd12) {
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04))
+ cmd13_checkbit = 1;
+
+ for (i = 0; i < 3; i++) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+
+SD_Execute_Read_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ if (read_err)
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, rsp_len, i;
+ int cmd13_checkbit = 0, write_err = 0;
+ u8 cmd_idx, rsp_type;
+ u8 send_cmd12 = 0, standby = 0, acmd = 0;
+ u32 data_len, arg;
+#ifdef SUPPORT_SD_LOCK
+ int lock_cmd_fail = 0;
+ u8 sd_lock_state = 0;
+ u8 lock_cmd_type = 0;
+#endif
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x04)
+ send_cmd12 = 1;
+
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8]
+ << 8) | srb->cmnd[9];
+ arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) |
+ ((u32)srb->cmnd[5] << 8) | srb->cmnd[6];
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ sd_lock_state = sd_card->sd_lock_status;
+ sd_lock_state &= SD_LOCKED;
+ }
+#endif
+
+ retval = get_rsp_type(srb, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#else
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#endif
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
+ sd_card->rsp, rsp_len, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ if (data_len <= 512) {
+ u16 i;
+ u8 *buf;
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, data_len, srb);
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK)
+ lock_cmd_type = buf[0] & 0x0F;
+#endif
+
+ if (data_len > 256) {
+ rtsx_init_cmd(chip);
+ for (i = 0; i < 256; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
+ }
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ rtsx_init_cmd(chip);
+ for (i = 256; i < data_len; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
+ }
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ } else {
+ rtsx_init_cmd(chip);
+ for (i = 0; i < data_len; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
+ }
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ }
+
+ kfree(buf);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ srb->cmnd[8] & 0x03);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ srb->cmnd[9]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ 0x01);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 250);
+ } else if (!(data_len & 0x1FF)) {
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ 0x02);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ DMA_TO_DEVICE, 10000);
+
+ } else {
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (retval < 0) {
+ write_err = 1;
+ rtsx_clear_sd_error(chip);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ if (lock_cmd_type == SD_ERASE) {
+ sd_card->sd_erase_status = SD_UNDER_ERASING;
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, 0xFD30, 0x02, 0x02);
+
+ rtsx_send_cmd(chip, SD_CARD, 250);
+
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS) {
+ RTSX_DEBUGP("Lock command fail!\n");
+ lock_cmd_fail = 1;
+ }
+ }
+#endif /* SUPPORT_SD_LOCK */
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (send_cmd12) {
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04))
+ cmd13_checkbit = 1;
+
+ for (i = 0; i < 3; i++) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ if (!lock_cmd_fail) {
+ RTSX_DEBUGP("lock_cmd_type = 0x%x\n", lock_cmd_type);
+ if (lock_cmd_type & SD_CLR_PWD)
+ sd_card->sd_lock_status &= ~SD_PWD_EXIST;
+
+ if (lock_cmd_type & SD_SET_PWD)
+ sd_card->sd_lock_status |= SD_PWD_EXIST;
+ }
+
+ RTSX_DEBUGP("sd_lock_state = 0x%x, sd_card->sd_lock_status = 0x%x\n",
+ sd_lock_state, sd_card->sd_lock_status);
+ if (sd_lock_state ^ (sd_card->sd_lock_status & SD_LOCKED)) {
+ sd_card->sd_lock_notify = 1;
+ if (sd_lock_state) {
+ if (sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) {
+ sd_card->sd_lock_status |= (
+ SD_UNLOCK_POW_ON | SD_SDR_RST);
+ if (CHK_SD(sd_card)) {
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ }
+
+ sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
+ }
+ }
+ }
+ }
+
+ if (lock_cmd_fail) {
+ scsi_set_resid(srb, 0);
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#endif /* SUPPORT_SD_LOCK */
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+
+SD_Execute_Write_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ if (write_err)
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int count;
+ u16 data_len;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ data_len = ((u16)srb->cmnd[7] << 8) | srb->cmnd[8];
+
+ if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) {
+ count = (data_len < 17) ? data_len : 17;
+ } else {
+ count = (data_len < 6) ? data_len : 6;
+ }
+ rtsx_stor_set_xfer_buf(sd_card->rsp, count, srb);
+
+ RTSX_DEBUGP("Response length: %d\n", data_len);
+ RTSX_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n", sd_card->rsp[0],
+ sd_card->rsp[1], sd_card->rsp[2], sd_card->rsp[3]);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
+ (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
+ (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
+ (0x64 != srb->cmnd[8])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[1] & 0x0F) {
+ case 0:
+#ifdef SUPPORT_SD_LOCK
+ if (0x64 == srb->cmnd[9])
+ sd_card->sd_lock_status |= SD_SDR_RST;
+#endif
+ retval = reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_SDR_RST;
+#endif
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ sd_card->pre_cmd_err = 1;
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_SDR_RST;
+#endif
+ break;
+
+ case 1:
+ retval = soft_reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ sd_card->pre_cmd_err = 1;
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+void sd_cleanup_work(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (sd_card->seq_mode) {
+ RTSX_DEBUGP("SD: stop transmission\n");
+ sd_stop_seq_mode(chip);
+ sd_card->cleanup_counter = 0;
+ }
+}
+
+int sd_power_off_card3v3(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = disable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, 0);
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(50);
+ }
+
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_disable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, FPGA_SD_PULL_CTL_BIT);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int release_sd_card(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ RTSX_DEBUGP("release_sd_card\n");
+
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ chip->card_wp &= ~SD_CARD;
+
+ chip->sd_io = 0;
+ chip->sd_int = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ memset(sd_card->raw_csd, 0, 16);
+ memset(sd_card->raw_scr, 0, 8);
+
+ retval = sd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
new file mode 100644
index 0000000..735b2d0
--- /dev/null
+++ b/drivers/staging/rts5208/sd.h
@@ -0,0 +1,301 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_SD_H
+#define __REALTEK_RTSX_SD_H
+
+#include "rtsx_chip.h"
+
+#define SUPPORT_VOLTAGE 0x003C0000
+
+/* Error Code */
+#define SD_NO_ERROR 0x0
+#define SD_CRC_ERR 0x80
+#define SD_TO_ERR 0x40
+#define SD_NO_CARD 0x20
+#define SD_BUSY 0x10
+#define SD_STS_ERR 0x08
+#define SD_RSP_TIMEOUT 0x04
+#define SD_IO_ERR 0x02
+
+/* Return code for MMC switch bus */
+#define SWITCH_SUCCESS 0
+#define SWITCH_ERR 1
+#define SWITCH_FAIL 2
+
+/* MMC/SD Command Index */
+/* Basic command (class 0) */
+#define GO_IDLE_STATE 0
+#define SEND_OP_COND 1
+#define ALL_SEND_CID 2
+#define SET_RELATIVE_ADDR 3
+#define SEND_RELATIVE_ADDR 3
+#define SET_DSR 4
+#define IO_SEND_OP_COND 5
+#define SWITCH 6
+#define SELECT_CARD 7
+#define DESELECT_CARD 7
+/* CMD8 is "SEND_EXT_CSD" for MMC4.x Spec
+ * while is "SEND_IF_COND" for SD 2.0
+ */
+#define SEND_EXT_CSD 8
+#define SEND_IF_COND 8
+
+#define SEND_CSD 9
+#define SEND_CID 10
+#define VOLTAGE_SWITCH 11
+#define READ_DAT_UTIL_STOP 11
+#define STOP_TRANSMISSION 12
+#define SEND_STATUS 13
+#define GO_INACTIVE_STATE 15
+
+#define SET_BLOCKLEN 16
+#define READ_SINGLE_BLOCK 17
+#define READ_MULTIPLE_BLOCK 18
+#define SEND_TUNING_PATTERN 19
+
+#define BUSTEST_R 14
+#define BUSTEST_W 19
+
+#define WRITE_BLOCK 24
+#define WRITE_MULTIPLE_BLOCK 25
+#define PROGRAM_CSD 27
+
+#define ERASE_WR_BLK_START 32
+#define ERASE_WR_BLK_END 33
+#define ERASE_CMD 38
+
+#define LOCK_UNLOCK 42
+#define IO_RW_DIRECT 52
+
+#define APP_CMD 55
+#define GEN_CMD 56
+
+#define SET_BUS_WIDTH 6
+#define SD_STATUS 13
+#define SEND_NUM_WR_BLOCKS 22
+#define SET_WR_BLK_ERASE_COUNT 23
+#define SD_APP_OP_COND 41
+#define SET_CLR_CARD_DETECT 42
+#define SEND_SCR 51
+
+#define SD_READ_COMPLETE 0x00
+#define SD_READ_TO 0x01
+#define SD_READ_ADVENCE 0x02
+
+#define SD_CHECK_MODE 0x00
+#define SD_SWITCH_MODE 0x80
+#define SD_FUNC_GROUP_1 0x01
+#define SD_FUNC_GROUP_2 0x02
+#define SD_FUNC_GROUP_3 0x03
+#define SD_FUNC_GROUP_4 0x04
+#define SD_CHECK_SPEC_V1_1 0xFF
+
+#define NO_ARGUMENT 0x00
+#define CHECK_PATTERN 0x000000AA
+#define VOLTAGE_SUPPLY_RANGE 0x00000100
+#define SUPPORT_HIGH_AND_EXTENDED_CAPACITY 0x40000000
+#define SUPPORT_MAX_POWER_PERMANCE 0x10000000
+#define SUPPORT_1V8 0x01000000
+
+#define SWTICH_NO_ERR 0x00
+#define CARD_NOT_EXIST 0x01
+#define SPEC_NOT_SUPPORT 0x02
+#define CHECK_MODE_ERR 0x03
+#define CHECK_NOT_READY 0x04
+#define SWITCH_CRC_ERR 0x05
+#define SWITCH_MODE_ERR 0x06
+#define SWITCH_PASS 0x07
+
+#ifdef SUPPORT_SD_LOCK
+#define SD_ERASE 0x08
+#define SD_LOCK 0x04
+#define SD_UNLOCK 0x00
+#define SD_CLR_PWD 0x02
+#define SD_SET_PWD 0x01
+
+#define SD_PWD_LEN 0x10
+
+#define SD_LOCKED 0x80
+#define SD_LOCK_1BIT_MODE 0x40
+#define SD_PWD_EXIST 0x20
+#define SD_UNLOCK_POW_ON 0x01
+#define SD_SDR_RST 0x02
+
+#define SD_NOT_ERASE 0x00
+#define SD_UNDER_ERASING 0x01
+#define SD_COMPLETE_ERASE 0x02
+
+#define SD_RW_FORBIDDEN 0x0F
+
+#endif
+
+#define HS_SUPPORT 0x01
+#define SDR50_SUPPORT 0x02
+#define SDR104_SUPPORT 0x03
+#define DDR50_SUPPORT 0x04
+
+#define HS_SUPPORT_MASK 0x02
+#define SDR50_SUPPORT_MASK 0x04
+#define SDR104_SUPPORT_MASK 0x08
+#define DDR50_SUPPORT_MASK 0x10
+
+#define HS_QUERY_SWITCH_OK 0x01
+#define SDR50_QUERY_SWITCH_OK 0x02
+#define SDR104_QUERY_SWITCH_OK 0x03
+#define DDR50_QUERY_SWITCH_OK 0x04
+
+#define HS_SWITCH_BUSY 0x02
+#define SDR50_SWITCH_BUSY 0x04
+#define SDR104_SWITCH_BUSY 0x08
+#define DDR50_SWITCH_BUSY 0x10
+
+#define FUNCTION_GROUP1_SUPPORT_OFFSET 0x0D
+#define FUNCTION_GROUP1_QUERY_SWITCH_OFFSET 0x10
+#define FUNCTION_GROUP1_CHECK_BUSY_OFFSET 0x1D
+
+#define DRIVING_TYPE_A 0x01
+#define DRIVING_TYPE_B 0x00
+#define DRIVING_TYPE_C 0x02
+#define DRIVING_TYPE_D 0x03
+
+#define DRIVING_TYPE_A_MASK 0x02
+#define DRIVING_TYPE_B_MASK 0x01
+#define DRIVING_TYPE_C_MASK 0x04
+#define DRIVING_TYPE_D_MASK 0x08
+
+#define TYPE_A_QUERY_SWITCH_OK 0x01
+#define TYPE_B_QUERY_SWITCH_OK 0x00
+#define TYPE_C_QUERY_SWITCH_OK 0x02
+#define TYPE_D_QUERY_SWITCH_OK 0x03
+
+#define TYPE_A_SWITCH_BUSY 0x02
+#define TYPE_B_SWITCH_BUSY 0x01
+#define TYPE_C_SWITCH_BUSY 0x04
+#define TYPE_D_SWITCH_BUSY 0x08
+
+#define FUNCTION_GROUP3_SUPPORT_OFFSET 0x09
+#define FUNCTION_GROUP3_QUERY_SWITCH_OFFSET 0x0F
+#define FUNCTION_GROUP3_CHECK_BUSY_OFFSET 0x19
+
+#define CURRENT_LIMIT_200 0x00
+#define CURRENT_LIMIT_400 0x01
+#define CURRENT_LIMIT_600 0x02
+#define CURRENT_LIMIT_800 0x03
+
+#define CURRENT_LIMIT_200_MASK 0x01
+#define CURRENT_LIMIT_400_MASK 0x02
+#define CURRENT_LIMIT_600_MASK 0x04
+#define CURRENT_LIMIT_800_MASK 0x08
+
+#define CURRENT_LIMIT_200_QUERY_SWITCH_OK 0x00
+#define CURRENT_LIMIT_400_QUERY_SWITCH_OK 0x01
+#define CURRENT_LIMIT_600_QUERY_SWITCH_OK 0x02
+#define CURRENT_LIMIT_800_QUERY_SWITCH_OK 0x03
+
+#define CURRENT_LIMIT_200_SWITCH_BUSY 0x01
+#define CURRENT_LIMIT_400_SWITCH_BUSY 0x02
+#define CURRENT_LIMIT_600_SWITCH_BUSY 0x04
+#define CURRENT_LIMIT_800_SWITCH_BUSY 0x08
+
+#define FUNCTION_GROUP4_SUPPORT_OFFSET 0x07
+#define FUNCTION_GROUP4_QUERY_SWITCH_OFFSET 0x0F
+#define FUNCTION_GROUP4_CHECK_BUSY_OFFSET 0x17
+
+#define DATA_STRUCTURE_VER_OFFSET 0x11
+
+#define MAX_PHASE 31
+
+#define MMC_8BIT_BUS 0x0010
+#define MMC_4BIT_BUS 0x0020
+
+#define MMC_SWITCH_ERR 0x80
+
+#define SD_IO_3V3 0
+#define SD_IO_1V8 1
+
+#define TUNE_TX 0x00
+#define TUNE_RX 0x01
+
+#define CHANGE_TX 0x00
+#define CHANGE_RX 0x01
+
+#define DCM_HIGH_FREQUENCY_MODE 0x00
+#define DCM_LOW_FREQUENCY_MODE 0x01
+
+#define DCM_HIGH_FREQUENCY_MODE_SET 0x0C
+#define DCM_Low_FREQUENCY_MODE_SET 0x00
+
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+struct timing_phase_path {
+ int start;
+ int end;
+ int mid;
+ int len;
+};
+
+int sd_select_card(struct rtsx_chip *chip, int select);
+int sd_pull_ctl_enable(struct rtsx_chip *chip);
+int reset_sd_card(struct rtsx_chip *chip);
+int sd_switch_clock(struct rtsx_chip *chip);
+void sd_stop_seq_mode(struct rtsx_chip *chip);
+int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt);
+void sd_cleanup_work(struct rtsx_chip *chip);
+int sd_power_off_card3v3(struct rtsx_chip *chip);
+int release_sd_card(struct rtsx_chip *chip);
+#ifdef SUPPORT_CPRM
+int soft_reset_sd_card(struct rtsx_chip *chip);
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, int special_check);
+int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type);
+
+int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+#endif
+
+#endif /* __REALTEK_RTSX_SD_H */
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
new file mode 100644
index 0000000..312b9f9
--- /dev/null
+++ b/drivers/staging/rts5208/spi.c
@@ -0,0 +1,877 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "spi.h"
+
+static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct spi_info *spi = &(chip->spi);
+
+ spi->err_code = err_code;
+}
+
+static int spi_init(struct rtsx_chip *chip)
+{
+ RTSX_WRITE_REG(chip, SPI_CONTROL, 0xFF,
+ CS_POLARITY_LOW | DTO_MSB_FIRST | SPI_MASTER | SPI_MODE0 |
+ SPI_AUTO);
+ RTSX_WRITE_REG(chip, SPI_TCTL, EDO_TIMING_MASK, SAMPLE_DELAY_HALF);
+
+ return STATUS_SUCCESS;
+}
+
+static int spi_set_init_para(struct rtsx_chip *chip)
+{
+ struct spi_info *spi = &(chip->spi);
+ int retval;
+
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER1, 0xFF, (u8)(spi->clk_div >> 8));
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER0, 0xFF, (u8)(spi->clk_div));
+
+ retval = switch_clock(chip, spi->spi_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, SPI_CLK_EN, SPI_CLK_EN);
+ RTSX_WRITE_REG(chip, CARD_OE, SPI_OUTPUT_EN, SPI_OUTPUT_EN);
+
+ wait_timeout(10);
+
+ retval = spi_init(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sf_polling_status(struct rtsx_chip *chip, int msec)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_POLLING_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, msec);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_BUSY_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
+{
+ struct spi_info *spi = &(chip->spi);
+ int retval;
+
+ if (!spi->write_en)
+ return STATUS_SUCCESS;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_C_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
+{
+ struct spi_info *spi = &(chip->spi);
+ int retval;
+
+ if (!spi->write_en)
+ return STATUS_SUCCESS;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_C_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
+ u16 len)
+{
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
+ if (addr_mode) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADO_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ }
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+}
+
+static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ if (addr_mode) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_C_MODE0);
+ }
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int spi_init_eeprom(struct rtsx_chip *chip)
+{
+ int retval;
+ int clk;
+
+ if (chip->asic_code)
+ clk = 30;
+ else
+ clk = CLK_30;
+
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER1, 0xFF, 0x00);
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER0, 0xFF, 0x27);
+
+ retval = switch_clock(chip, clk);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, SPI_CLK_EN, SPI_CLK_EN);
+ RTSX_WRITE_REG(chip, CARD_OE, SPI_OUTPUT_EN, SPI_OUTPUT_EN);
+
+ wait_timeout(10);
+
+ RTSX_WRITE_REG(chip, SPI_CONTROL, 0xFF,
+ CS_POLARITY_HIGH | SPI_EEPROM_AUTO);
+ RTSX_WRITE_REG(chip, SPI_TCTL, EDO_TIMING_MASK, SAMPLE_DELAY_HALF);
+
+ return STATUS_SUCCESS;
+}
+
+static int spi_eeprom_program_enable(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_erase_eeprom_chip(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = spi_eeprom_program_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
+{
+ int retval;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = spi_eeprom_program_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x07);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+
+int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
+{
+ int retval;
+ u8 data;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x06);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(5);
+ RTSX_READ_REG(chip, SPI_DATA, &data);
+
+ if (val)
+ *val = data;
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
+{
+ int retval;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = spi_eeprom_program_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x05);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, val);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+
+int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct spi_info *spi = &(chip->spi);
+
+ RTSX_DEBUGP("spi_get_status: err_code = 0x%x\n", spi->err_code);
+ rtsx_stor_set_xfer_buf(&(spi->err_code),
+ min_t(int, scsi_bufflen(srb), 1), srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - 1);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct spi_info *spi = &(chip->spi);
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ if (chip->asic_code)
+ spi->spi_clock = ((u16)(srb->cmnd[8]) << 8) | srb->cmnd[9];
+ else
+ spi->spi_clock = srb->cmnd[3];
+
+ spi->clk_div = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ spi->write_en = srb->cmnd[6];
+
+ RTSX_DEBUGP("spi_set_parameter: spi_clock = %d, clk_div = %d, write_en = %d\n",
+ spi->spi_clock, spi->clk_div, spi->write_en);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u16 len;
+ u8 *buf;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ if (len > 512) {
+ spi_set_err_code(chip, SPI_INVALID_COMMAND);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
+
+ if (len == 0) {
+ if (srb->cmnd[9]) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
+ 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
+ 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
+ }
+ } else {
+ if (srb->cmnd[9]) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CDI_MODE0);
+ }
+ }
+
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (len) {
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ retval = rtsx_read_ppbuf(chip, buf, len);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_READ_ERR);
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ scsi_set_resid(srb, 0);
+
+ kfree(buf);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ unsigned int index = 0, offset = 0;
+ u8 ins, slow_read;
+ u32 addr;
+ u16 len;
+ u8 *buf;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ ins = srb->cmnd[3];
+ addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
+ << 8) | srb->cmnd[6];
+ len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ slow_read = srb->cmnd[9];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ while (len) {
+ u16 pagelen = SF_PAGE_LEN - (u8)addr;
+
+ if (pagelen > len)
+ pagelen = len;
+
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, 256, DMA_256);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+
+ if (slow_read) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
+ (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
+ }
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
+ (u8)(pagelen >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
+ (u8)pagelen);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
+ SPI_TRANSFER0_END, SPI_TRANSFER0_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
+ DMA_FROM_DEVICE, 10000);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
+ TO_XFER_BUF);
+
+ addr += pagelen;
+ len -= pagelen;
+ }
+
+ scsi_set_resid(srb, 0);
+ kfree(buf);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 ins, program_mode;
+ u32 addr;
+ u16 len;
+ u8 *buf;
+ unsigned int index = 0, offset = 0;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ ins = srb->cmnd[3];
+ addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
+ << 8) | srb->cmnd[6];
+ len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ program_mode = srb->cmnd[9];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (program_mode == BYTE_PROGRAM) {
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ while (len) {
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
+ FROM_XFER_BUF);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
+ buf[0]);
+ sf_program(chip, ins, 1, addr, 1);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ addr++;
+ len--;
+ }
+
+ kfree(buf);
+
+ } else if (program_mode == AAI_PROGRAM) {
+ int first_byte = 1;
+
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ while (len) {
+ rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
+ FROM_XFER_BUF);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
+ buf[0]);
+ if (first_byte) {
+ sf_program(chip, ins, 1, addr, 1);
+ first_byte = 0;
+ } else {
+ sf_program(chip, ins, 0, 0, 1);
+ }
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ len--;
+ }
+
+ kfree(buf);
+
+ retval = sf_disable_write(chip, SPI_WRDI);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else if (program_mode == PAGE_PROGRAM) {
+ buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ while (len) {
+ u16 pagelen = SF_PAGE_LEN - (u8)addr;
+
+ if (pagelen > len)
+ pagelen = len;
+
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 256, DMA_256);
+ sf_program(chip, ins, 1, addr, pagelen);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
+ &offset, FROM_XFER_BUF);
+
+ retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
+ DMA_TO_DEVICE, 100);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ addr += pagelen;
+ len -= pagelen;
+ }
+
+ kfree(buf);
+ } else {
+ spi_set_err_code(chip, SPI_INVALID_COMMAND);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 ins, erase_mode;
+ u32 addr;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ ins = srb->cmnd[3];
+ addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
+ << 8) | srb->cmnd[6];
+ erase_mode = srb->cmnd[9];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (erase_mode == PAGE_ERASE) {
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sf_erase(chip, ins, 1, addr);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else if (erase_mode == CHIP_ERASE) {
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sf_erase(chip, ins, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ spi_set_err_code(chip, SPI_INVALID_COMMAND);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 ins, status, ewsr;
+
+ ins = srb->cmnd[3];
+ status = srb->cmnd[4];
+ ewsr = srb->cmnd[5];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_enable_write(chip, ewsr);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/spi.h b/drivers/staging/rts5208/spi.h
new file mode 100644
index 0000000..fc824b5
--- /dev/null
+++ b/drivers/staging/rts5208/spi.h
@@ -0,0 +1,65 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_SPI_H
+#define __REALTEK_RTSX_SPI_H
+
+/* SPI operation error */
+#define SPI_NO_ERR 0x00
+#define SPI_HW_ERR 0x01
+#define SPI_INVALID_COMMAND 0x02
+#define SPI_READ_ERR 0x03
+#define SPI_WRITE_ERR 0x04
+#define SPI_ERASE_ERR 0x05
+#define SPI_BUSY_ERR 0x06
+
+/* Serial flash instruction */
+#define SPI_READ 0x03
+#define SPI_FAST_READ 0x0B
+#define SPI_WREN 0x06
+#define SPI_WRDI 0x04
+#define SPI_RDSR 0x05
+
+#define SF_PAGE_LEN 256
+
+#define BYTE_PROGRAM 0
+#define AAI_PROGRAM 1
+#define PAGE_PROGRAM 2
+
+#define PAGE_ERASE 0
+#define CHIP_ERASE 1
+
+int spi_erase_eeprom_chip(struct rtsx_chip *chip);
+int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr);
+int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val);
+int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val);
+int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+
+
+#endif /* __REALTEK_RTSX_SPI_H */
diff --git a/drivers/staging/rts5208/trace.h b/drivers/staging/rts5208/trace.h
new file mode 100644
index 0000000..0f177fb
--- /dev/null
+++ b/drivers/staging/rts5208/trace.h
@@ -0,0 +1,93 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_TRACE_H
+#define __REALTEK_RTSX_TRACE_H
+
+#define _MSG_TRACE
+
+#ifdef _MSG_TRACE
+static inline char *filename(char *path)
+{
+ char *ptr;
+
+ if (path == NULL)
+ return NULL;
+
+ ptr = path;
+
+ while (*ptr != '\0') {
+ if ((*ptr == '\\') || (*ptr == '/'))
+ path = ptr + 1;
+
+ ptr++;
+ }
+
+ return path;
+}
+
+#define TRACE_RET(chip, ret) \
+ do { \
+ char *_file = filename(__FILE__); \
+ RTSX_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
+ (chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].func, __func__, MSG_FUNC_LEN-1); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].file, _file, MSG_FILE_LEN-1); \
+ get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf, TIME_VAL_LEN); \
+ (chip)->trace_msg[(chip)->msg_idx].valid = 1; \
+ (chip)->msg_idx++; \
+ if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
+ (chip)->msg_idx = 0; \
+ } \
+ return ret; \
+ } while (0)
+
+#define TRACE_GOTO(chip, label) \
+ do { \
+ char *_file = filename(__FILE__); \
+ RTSX_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
+ (chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].func, __func__, MSG_FUNC_LEN-1); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].file, _file, MSG_FILE_LEN-1); \
+ get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf, TIME_VAL_LEN); \
+ (chip)->trace_msg[(chip)->msg_idx].valid = 1; \
+ (chip)->msg_idx++; \
+ if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
+ (chip)->msg_idx = 0; \
+ } \
+ goto label; \
+ } while (0)
+#else
+#define TRACE_RET(chip, ret) return ret
+#define TRACE_GOTO(chip, label) goto label
+#endif
+
+#ifdef CONFIG_RTS5208_DEBUG
+#define RTSX_DUMP(buf, buf_len) \
+ print_hex_dump(KERN_DEBUG, RTSX_STOR, DUMP_PREFIX_NONE, \
+ 16, 1, (buf), (buf_len), false)
+#else
+#define RTSX_DUMP(buf, buf_len)
+#endif
+
+#endif /* __REALTEK_RTSX_TRACE_H */
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
new file mode 100644
index 0000000..6aef53d
--- /dev/null
+++ b/drivers/staging/rts5208/xd.c
@@ -0,0 +1,2088 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "xd.h"
+
+static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no);
+static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
+ u8 start_page, u8 end_page);
+
+static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ xd_card->err_code = err_code;
+}
+
+static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ return (xd_card->err_code == err_code);
+}
+
+static int xd_set_init_para(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ if (chip->asic_code)
+ xd_card->xd_clock = 47;
+ else
+ xd_card->xd_clock = CLK_50;
+
+ retval = switch_clock(chip, xd_card->xd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_switch_clock(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ retval = select_card(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = switch_clock(chip, xd_card->xd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
+{
+ int retval, i;
+ u8 *ptr;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_ID);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ for (i = 0; i < 4; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 20);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+ if (id_buf && buf_len) {
+ if (buf_len > 4)
+ buf_len = 4;
+ memcpy(id_buf, ptr, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ switch (mode) {
+ case XD_RW_ADDR:
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
+ 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
+ 0xFF, (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
+ xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM);
+ break;
+
+ case XD_ERASE_ADDR:
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
+ 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
+ 0xFF, (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
+ (xd_card->addr_cycle - 1) | XD_CALC_ECC |
+ XD_BA_NO_TRANSFORM);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
+ u8 *buf, int buf_len)
+{
+ int retval, i;
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ for (i = 0; i < 6; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
+ 0, 0);
+ for (i = 0; i < 4; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
+ 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (buf && buf_len) {
+ u8 *ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if (buf_len > 11)
+ buf_len = 11;
+ memcpy(buf, ptr, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
+ u8 *buf, int buf_len)
+{
+ int retval, i;
+
+ if (!buf || (buf_len < 0))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ for (i = 0; i < buf_len; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
+ 0, 0);
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(buf, rtsx_get_cmd_data(chip), buf_len);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
+ int buf_len)
+{
+ int retval;
+ u8 reg;
+
+ if (!buf || (buf_len < 10))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 250);
+ if (retval == -ETIMEDOUT) {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, XD_PAGE_STATUS, &reg);
+ if (reg != XD_GPG) {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, XD_CTL, &reg);
+ if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
+ retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (reg & XD_ECC1_ERROR) {
+ u8 ecc_bit, ecc_byte;
+
+ RTSX_READ_REG(chip, XD_ECC_BIT1, &ecc_bit);
+ RTSX_READ_REG(chip, XD_ECC_BYTE1, &ecc_byte);
+
+ RTSX_DEBUGP("ECC_BIT1 = 0x%x, ECC_BYTE1 = 0x%x\n",
+ ecc_bit, ecc_byte);
+ if (ecc_byte < buf_len) {
+ RTSX_DEBUGP("Before correct: 0x%x\n",
+ buf[ecc_byte]);
+ buf[ecc_byte] ^= (1 << ecc_bit);
+ RTSX_DEBUGP("After correct: 0x%x\n",
+ buf[ecc_byte]);
+ }
+ }
+ } else if (!(reg & XD_ECC2_ERROR) || !(reg & XD_ECC2_UNCORRECTABLE)) {
+ rtsx_clear_xd_error(chip);
+
+ retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (reg & XD_ECC2_ERROR) {
+ u8 ecc_bit, ecc_byte;
+
+ RTSX_READ_REG(chip, XD_ECC_BIT2, &ecc_bit);
+ RTSX_READ_REG(chip, XD_ECC_BYTE2, &ecc_byte);
+
+ RTSX_DEBUGP("ECC_BIT2 = 0x%x, ECC_BYTE2 = 0x%x\n",
+ ecc_bit, ecc_byte);
+ if (ecc_byte < buf_len) {
+ RTSX_DEBUGP("Before correct: 0x%x\n",
+ buf[ecc_byte]);
+ buf[ecc_byte] ^= (1 << ecc_bit);
+ RTSX_DEBUGP("After correct: 0x%x\n",
+ buf[ecc_byte]);
+ }
+ }
+ } else {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
+ 0xFF, 0x4B);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
+ 0xFF, 0x69);
+ }
+ }
+}
+
+static void xd_fill_pull_ctl_stage1_barossa(struct rtsx_chip *chip)
+{
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x4B);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ }
+}
+
+static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
+ 0xFF, 0x53);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
+ 0xFF, 0xA9);
+ }
+ }
+}
+
+static int xd_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
+ XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_xd(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval, i, j;
+ u8 *ptr, id_buf[4], redunt[11];
+
+ retval = select_card(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
+ XD_PGSTS_NOT_FF);
+ if (chip->asic_code) {
+ if (!CHECK_PID(chip, 0x5288))
+ xd_fill_pull_ctl_disable(chip);
+ else
+ xd_fill_pull_ctl_stage1_barossa(chip);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20);
+ }
+
+ if (!chip->ft2_fast_mode)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
+ XD_NO_AUTO_PWR_OFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(250);
+
+ rtsx_init_cmd(chip);
+
+ if (chip->asic_code) {
+ xd_fill_pull_ctl_enable(chip);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
+ }
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = card_power_on(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef SUPPORT_OCP
+ wait_timeout(50);
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ rtsx_init_cmd(chip);
+
+ if (chip->ft2_fast_mode) {
+ if (chip->asic_code) {
+ xd_fill_pull_ctl_enable(chip);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
+ }
+ }
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, XD_OUTPUT_EN);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CTL, XD_CE_DISEN, XD_CE_DISEN);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode)
+ wait_timeout(200);
+
+ retval = xd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Read ID to check if the timing setting is right */
+ for (i = 0; i < 4; i++) {
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
+ XD_TIME_SETUP_STEP * 3 +
+ XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
+ XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) +
+ XD_TIME_RWN_STEP * (3 + i));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_RESET);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ RTSX_DEBUGP("XD_DAT: 0x%x, XD_CTL: 0x%x\n", ptr[0], ptr[1]);
+
+ if (((ptr[0] & READY_FLAG) != READY_STATE) ||
+ !(ptr[1] & XD_RDY))
+ continue;
+
+ retval = xd_read_id(chip, READ_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("READ_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
+
+ xd_card->device_code = id_buf[1];
+
+ /* Check if the xD card is supported */
+ switch (xd_card->device_code) {
+ case XD_4M_X8_512_1:
+ case XD_4M_X8_512_2:
+ xd_card->block_shift = 4;
+ xd_card->page_off = 0x0F;
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 8000;
+ XD_SET_4MB(xd_card);
+ break;
+ case XD_8M_X8_512:
+ xd_card->block_shift = 4;
+ xd_card->page_off = 0x0F;
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 16000;
+ break;
+ case XD_16M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 32000;
+ break;
+ case XD_32M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 2;
+ xd_card->capacity = 64000;
+ break;
+ case XD_64M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 4;
+ xd_card->capacity = 128000;
+ break;
+ case XD_128M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 8;
+ xd_card->capacity = 256000;
+ break;
+ case XD_256M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 16;
+ xd_card->capacity = 512000;
+ break;
+ case XD_512M_X8:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 32;
+ xd_card->capacity = 1024000;
+ break;
+ case xD_1G_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 64;
+ xd_card->capacity = 2048000;
+ break;
+ case xD_2G_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 128;
+ xd_card->capacity = 4096000;
+ break;
+ default:
+ continue;
+ }
+
+ /* Confirm timing setting */
+ for (j = 0; j < 10; j++) {
+ retval = xd_read_id(chip, READ_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (id_buf[1] != xd_card->device_code)
+ break;
+ }
+
+ if (j == 10)
+ break;
+ }
+
+ if (i == 4) {
+ xd_card->block_shift = 0;
+ xd_card->page_off = 0;
+ xd_card->addr_cycle = 0;
+ xd_card->capacity = 0;
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ RTSX_DEBUGP("READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
+ if (id_buf[2] != XD_ID_CODE)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Search CIS block */
+ for (i = 0; i < 24; i++) {
+ u32 page_addr;
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_addr = (u32)i << xd_card->block_shift;
+
+ for (j = 0; j < 3; j++) {
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (j == 3)
+ continue;
+
+ if (redunt[BLOCK_STATUS] != XD_GBLK)
+ continue;
+
+ j = 0;
+ if (redunt[PAGE_STATUS] != XD_GPG) {
+ for (j = 1; j <= 8; j++) {
+ retval = xd_read_redundant(chip, page_addr + j,
+ redunt, 11);
+ if (retval == STATUS_SUCCESS) {
+ if (redunt[PAGE_STATUS] == XD_GPG)
+ break;
+ }
+ }
+
+ if (j == 9)
+ break;
+ }
+
+ /* Check CIS data */
+ if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
+ (redunt[PARITY] & XD_BA1_ALL0)) {
+ u8 buf[10];
+
+ page_addr += j;
+
+ retval = xd_read_cis(chip, page_addr, buf, 10);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
+ (buf[2] == 0xD9)
+ && (buf[3] == 0x01) && (buf[4] == 0xFF)
+ && (buf[5] == 0x18) && (buf[6] == 0x02)
+ && (buf[7] == 0xDF) && (buf[8] == 0x01)
+ && (buf[9] == 0x20)) {
+ xd_card->cis_block = (u16)i;
+ }
+ }
+
+ break;
+ }
+
+ RTSX_DEBUGP("CIS block: 0x%x\n", xd_card->cis_block);
+ if (xd_card->cis_block == 0xFFFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_check_data_blank(u8 *redunt)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (redunt[PAGE_STATUS + i] != 0xFF)
+ return 0;
+ }
+
+ if ((redunt[PARITY] & (XD_ECC1_ALL1 | XD_ECC2_ALL1))
+ != (XD_ECC1_ALL1 | XD_ECC2_ALL1))
+ return 0;
+
+
+ for (i = 0; i < 4; i++) {
+ if (redunt[RESERVED0 + i] != 0xFF)
+ return 0;
+ }
+
+ return 1;
+}
+
+static u16 xd_load_log_block_addr(u8 *redunt)
+{
+ u16 addr = 0xFFFF;
+
+ if (redunt[PARITY] & XD_BA1_BA2_EQL)
+ addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
+ redunt[BLOCK_ADDR1_L];
+ else if (redunt[PARITY] & XD_BA1_VALID)
+ addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
+ redunt[BLOCK_ADDR1_L];
+ else if (redunt[PARITY] & XD_BA2_VALID)
+ addr = ((u16)redunt[BLOCK_ADDR2_H] << 8) |
+ redunt[BLOCK_ADDR2_L];
+
+ return addr;
+}
+
+static int xd_init_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int size, i;
+
+ RTSX_DEBUGP("xd_init_l2p_tbl: zone_cnt = %d\n", xd_card->zone_cnt);
+
+ if (xd_card->zone_cnt < 1)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ size = xd_card->zone_cnt * sizeof(struct zone_entry);
+ RTSX_DEBUGP("Buffer size for l2p table is %d\n", size);
+
+ xd_card->zone = vmalloc(size);
+ if (!xd_card->zone)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ for (i = 0; i < xd_card->zone_cnt; i++) {
+ xd_card->zone[i].build_flag = 0;
+ xd_card->zone[i].l2p_table = NULL;
+ xd_card->zone[i].free_table = NULL;
+ xd_card->zone[i].get_index = 0;
+ xd_card->zone[i].set_index = 0;
+ xd_card->zone[i].unused_blk_cnt = 0;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static inline void free_zone(struct zone_entry *zone)
+{
+ RTSX_DEBUGP("free_zone\n");
+
+ if (!zone)
+ return;
+
+ zone->build_flag = 0;
+ zone->set_index = 0;
+ zone->get_index = 0;
+ zone->unused_blk_cnt = 0;
+ if (zone->l2p_table) {
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ }
+ if (zone->free_table) {
+ vfree(zone->free_table);
+ zone->free_table = NULL;
+ }
+}
+
+static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int zone_no;
+
+ zone_no = (int)phy_blk >> 10;
+ if (zone_no >= xd_card->zone_cnt) {
+ RTSX_DEBUGP("Set unused block to invalid zone (zone_no = %d, zone_cnt = %d)\n",
+ zone_no, xd_card->zone_cnt);
+ return;
+ }
+ zone = &(xd_card->zone[zone_no]);
+
+ if (zone->free_table == NULL) {
+ if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
+ return;
+ }
+
+ if ((zone->set_index >= XD_FREE_TABLE_CNT)
+ || (zone->set_index < 0)) {
+ free_zone(zone);
+ RTSX_DEBUGP("Set unused block fail, invalid set_index\n");
+ return;
+ }
+
+ RTSX_DEBUGP("Set unused block to index %d\n", zone->set_index);
+
+ zone->free_table[zone->set_index++] = (u16) (phy_blk & 0x3ff);
+ if (zone->set_index >= XD_FREE_TABLE_CNT)
+ zone->set_index = 0;
+ zone->unused_blk_cnt++;
+}
+
+static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ u32 phy_blk;
+
+ if (zone_no >= xd_card->zone_cnt) {
+ RTSX_DEBUGP("Get unused block from invalid zone (zone_no = %d, zone_cnt = %d)\n",
+ zone_no, xd_card->zone_cnt);
+ return BLK_NOT_FOUND;
+ }
+ zone = &(xd_card->zone[zone_no]);
+
+ if ((zone->unused_blk_cnt == 0) ||
+ (zone->set_index == zone->get_index)) {
+ free_zone(zone);
+ RTSX_DEBUGP("Get unused block fail, no unused block available\n");
+ return BLK_NOT_FOUND;
+ }
+ if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
+ free_zone(zone);
+ RTSX_DEBUGP("Get unused block fail, invalid get_index\n");
+ return BLK_NOT_FOUND;
+ }
+
+ RTSX_DEBUGP("Get unused block from index %d\n", zone->get_index);
+
+ phy_blk = zone->free_table[zone->get_index];
+ zone->free_table[zone->get_index++] = 0xFFFF;
+ if (zone->get_index >= XD_FREE_TABLE_CNT)
+ zone->get_index = 0;
+ zone->unused_blk_cnt--;
+
+ phy_blk += ((u32)(zone_no) << 10);
+ return phy_blk;
+}
+
+static void xd_set_l2p_tbl(struct rtsx_chip *chip,
+ int zone_no, u16 log_off, u16 phy_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+
+ zone = &(xd_card->zone[zone_no]);
+ zone->l2p_table[log_off] = phy_off;
+}
+
+static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int retval;
+
+ zone = &(xd_card->zone[zone_no]);
+ if (zone->l2p_table[log_off] == 0xFFFF) {
+ u32 phy_blk = 0;
+ int i;
+
+#ifdef XD_DELAY_WRITE
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ RTSX_DEBUGP("In xd_get_l2p_tbl, delay write fail!\n");
+ return BLK_NOT_FOUND;
+ }
+#endif
+
+ if (zone->unused_blk_cnt <= 0) {
+ RTSX_DEBUGP("No unused block!\n");
+ return BLK_NOT_FOUND;
+ }
+
+ for (i = 0; i < zone->unused_blk_cnt; i++) {
+ phy_blk = xd_get_unused_block(chip, zone_no);
+ if (phy_blk == BLK_NOT_FOUND) {
+ RTSX_DEBUGP("No unused block available!\n");
+ return BLK_NOT_FOUND;
+ }
+
+ retval = xd_init_page(chip, phy_blk, log_off,
+ 0, xd_card->page_off + 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i >= zone->unused_blk_cnt) {
+ RTSX_DEBUGP("No good unused block available!\n");
+ return BLK_NOT_FOUND;
+ }
+
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(phy_blk & 0x3FF));
+ return phy_blk;
+ }
+
+ return (u32)zone->l2p_table[log_off] + ((u32)(zone_no) << 10);
+}
+
+int reset_xd_card(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ memset(xd_card, 0, sizeof(struct xd_info));
+
+ xd_card->block_shift = 0;
+ xd_card->page_off = 0;
+ xd_card->addr_cycle = 0;
+ xd_card->capacity = 0;
+ xd_card->zone_cnt = 0;
+ xd_card->cis_block = 0xFFFF;
+ xd_card->delay_write.delay_write_flag = 0;
+
+ retval = enable_card_clock(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = reset_xd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = xd_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+ u32 page_addr;
+ u8 reg = 0;
+
+ RTSX_DEBUGP("mark block 0x%x as bad block\n", phy_blk);
+
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_LATER_BBLK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_H, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_L, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED0, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED1, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED2, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED3, 0xFF, 0xFF);
+
+ page_addr = phy_blk << xd_card->block_shift;
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
+ xd_card->page_off + 1);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR)
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ else
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
+ u16 logoff, u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+ u32 page_addr;
+ u8 reg = 0;
+
+ RTSX_DEBUGP("Init block 0x%x\n", phy_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
+ 0xFF, (u8)(logoff >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
+
+ page_addr = (phy_blk << xd_card->block_shift) + start_page;
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
+ XD_BA_TRANSFORM, XD_BA_TRANSFORM);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
+ 0xFF, (end_page - start_page));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
+ u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 old_page, new_page;
+ u8 i, reg = 0;
+ int retval;
+
+ RTSX_DEBUGP("Copy page from block 0x%x to block 0x%x\n",
+ old_blk, new_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ old_page = (old_blk << xd_card->block_shift) + start_page;
+ new_page = (new_blk << xd_card->block_shift) + start_page;
+
+ XD_CLR_BAD_NEWBLK(xd_card);
+
+ RTSX_WRITE_REG(chip, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ for (i = start_page; i < end_page; i++) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ rtsx_clear_xd_error(chip);
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, old_page, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ reg = 0;
+ rtsx_read_register(chip, XD_CTL, &reg);
+ if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
+ wait_timeout(100);
+
+ if (detect_card_cd(chip,
+ XD_CARD) != STATUS_SUCCESS) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ rtsx_write_register(chip,
+ XD_PAGE_STATUS, 0xFF,
+ XD_BPG);
+ rtsx_write_register(chip,
+ XD_BLOCK_STATUS, 0xFF,
+ XD_GBLK);
+ XD_SET_BAD_OLDBLK(xd_card);
+ RTSX_DEBUGP("old block 0x%x ecc error\n", old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (XD_CHK_BAD_OLDBLK(xd_card))
+ rtsx_clear_xd_error(chip);
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, new_page, XD_RW_ADDR);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 300);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ reg = 0;
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, new_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ XD_SET_BAD_NEWBLK(xd_card);
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ old_page++;
+ new_page++;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_reset_cmd(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 *ptr;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_RESET);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+ if (((ptr[0] & READY_FLAG) == READY_STATE) && (ptr[1] & XD_RDY))
+ return STATUS_SUCCESS;
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr;
+ u8 reg = 0, *ptr;
+ int i, retval;
+
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_addr = phy_blk << xd_card->block_shift;
+
+ for (i = 0; i < 3; i++) {
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_ERASE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 250);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ xd_set_err_code(chip, XD_ERASE_FAIL);
+ }
+ retval = xd_reset_cmd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ continue;
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+ if (*ptr & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+ }
+
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_ERASE_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+
+static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int retval;
+ u32 start, end, i;
+ u16 max_logoff, cur_fst_page_logoff;
+ u16 cur_lst_page_logoff, ent_lst_page_logoff;
+ u8 redunt[11];
+
+ RTSX_DEBUGP("xd_build_l2p_tbl: %d\n", zone_no);
+
+ if (xd_card->zone == NULL) {
+ retval = xd_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ return retval;
+ }
+
+ if (xd_card->zone[zone_no].build_flag) {
+ RTSX_DEBUGP("l2p table of zone %d has been built\n", zone_no);
+ return STATUS_SUCCESS;
+ }
+
+ zone = &(xd_card->zone[zone_no]);
+
+ if (zone->l2p_table == NULL) {
+ zone->l2p_table = vmalloc(2000);
+ if (zone->l2p_table == NULL)
+ TRACE_GOTO(chip, Build_Fail);
+ }
+ memset((u8 *)(zone->l2p_table), 0xff, 2000);
+
+ if (zone->free_table == NULL) {
+ zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
+ if (zone->free_table == NULL)
+ TRACE_GOTO(chip, Build_Fail);
+ }
+ memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
+
+ if (zone_no == 0) {
+ if (xd_card->cis_block == 0xFFFF)
+ start = 0;
+ else
+ start = xd_card->cis_block + 1;
+ if (XD_CHK_4MB(xd_card)) {
+ end = 0x200;
+ max_logoff = 499;
+ } else {
+ end = 0x400;
+ max_logoff = 999;
+ }
+ } else {
+ start = (u32)(zone_no) << 10;
+ end = (u32)(zone_no + 1) << 10;
+ max_logoff = 999;
+ }
+
+ RTSX_DEBUGP("start block 0x%x, end block 0x%x\n", start, end);
+
+ zone->set_index = zone->get_index = 0;
+ zone->unused_blk_cnt = 0;
+
+ for (i = start; i < end; i++) {
+ u32 page_addr = i << xd_card->block_shift;
+ u32 phy_block;
+
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ if (redunt[BLOCK_STATUS] != 0xFF) {
+ RTSX_DEBUGP("bad block\n");
+ continue;
+ }
+
+ if (xd_check_data_blank(redunt)) {
+ RTSX_DEBUGP("blank block\n");
+ xd_set_unused_block(chip, i);
+ continue;
+ }
+
+ cur_fst_page_logoff = xd_load_log_block_addr(redunt);
+ if ((cur_fst_page_logoff == 0xFFFF) ||
+ (cur_fst_page_logoff > max_logoff)) {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ continue;
+ }
+
+ if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
+ (redunt[PAGE_STATUS] != XD_GPG))
+ XD_SET_MBR_FAIL(xd_card);
+
+ if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
+ zone->l2p_table[cur_fst_page_logoff] = (u16)(i & 0x3FF);
+ continue;
+ }
+
+ phy_block = zone->l2p_table[cur_fst_page_logoff] +
+ ((u32)((zone_no) << 10));
+
+ page_addr = ((i + 1) << xd_card->block_shift) - 1;
+
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ cur_lst_page_logoff = xd_load_log_block_addr(redunt);
+ if (cur_lst_page_logoff == cur_fst_page_logoff) {
+ int m;
+
+ page_addr = ((phy_block + 1) <<
+ xd_card->block_shift) - 1;
+
+ for (m = 0; m < 3; m++) {
+ retval = xd_read_redundant(chip, page_addr,
+ redunt, 11);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+
+ if (m == 3) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16)(i & 0x3FF);
+ retval = xd_erase_block(chip, phy_block);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, phy_block);
+ continue;
+ }
+
+ ent_lst_page_logoff = xd_load_log_block_addr(redunt);
+ if (ent_lst_page_logoff != cur_fst_page_logoff) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16)(i & 0x3FF);
+ retval = xd_erase_block(chip, phy_block);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, phy_block);
+ continue;
+ } else {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ }
+ } else {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ }
+ }
+
+ if (XD_CHK_4MB(xd_card))
+ end = 500;
+ else
+ end = 1000;
+
+ i = 0;
+ for (start = 0; start < end; start++) {
+ if (zone->l2p_table[start] == 0xFFFF)
+ i++;
+ }
+
+ RTSX_DEBUGP("Block count %d, invalid L2P entry %d\n", end, i);
+ RTSX_DEBUGP("Total unused block: %d\n", zone->unused_blk_cnt);
+
+ if ((zone->unused_blk_cnt - i) < 1)
+ chip->card_wp |= XD_CARD;
+
+ zone->build_flag = 1;
+
+ return STATUS_SUCCESS;
+
+Build_Fail:
+ if (zone->l2p_table) {
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ }
+ if (zone->free_table) {
+ vfree(zone->free_table);
+ zone->free_table = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_SET_CMD);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 200);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
+ u32 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr, new_blk;
+ u16 log_off;
+ u8 reg_val, page_cnt;
+ int zone_no, retval, i;
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_cnt = end_page - start_page;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if ((phy_blk & 0x3FF) == 0x3FF) {
+ for (i = 0; i < 256; i++) {
+ page_addr = ((u32)i) << xd_card->block_shift;
+
+ retval = xd_read_redundant(chip, page_addr, NULL, 0);
+ if (retval == STATUS_SUCCESS)
+ break;
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ page_addr = (phy_blk << xd_card->block_shift) + start_page;
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_PPB_TO_SIE, XD_PPB_TO_SIE);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+
+ trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_FROM_DEVICE,
+ chip->xd_timeout);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+
+ if (retval == -ETIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ RTSX_READ_REG(chip, XD_PAGE_STATUS, &reg_val);
+
+ if (reg_val != XD_GPG)
+ xd_set_err_code(chip, XD_PRG_ERROR);
+
+ RTSX_READ_REG(chip, XD_CTL, &reg_val);
+
+ if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
+ == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ wait_timeout(100);
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ xd_set_err_code(chip, XD_ECC_ERROR);
+
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if (new_blk == NO_NEW_BLK) {
+ XD_CLR_BAD_OLDBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_copy_page(chip, phy_blk, new_blk, 0,
+ xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ if (!XD_CHK_BAD_NEWBLK(xd_card)) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ } else {
+ XD_CLR_BAD_NEWBLK(xd_card);
+ }
+ XD_CLR_BAD_OLDBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
+ xd_erase_block(chip, phy_blk);
+ xd_mark_bad_block(chip, phy_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_finish_write(struct rtsx_chip *chip,
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval, zone_no;
+ u16 log_off;
+
+ RTSX_DEBUGP("xd_finish_write, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
+ old_blk, new_blk, log_blk);
+
+ if (page_off > xd_card->page_off)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if (old_blk == BLK_NOT_FOUND) {
+ retval = xd_init_page(chip, new_blk, log_off,
+ page_off, xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval = xd_copy_page(chip, old_blk, new_blk,
+ page_off, xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ if (!XD_CHK_BAD_NEWBLK(xd_card)) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ }
+ XD_CLR_BAD_NEWBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS) {
+ if (XD_CHK_BAD_OLDBLK(xd_card)) {
+ xd_mark_bad_block(chip, old_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ } else {
+ xd_set_unused_block(chip, old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_NO_ERROR);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+ }
+
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_prepare_write(struct rtsx_chip *chip,
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x, page_off = %d\n",
+ __func__, old_blk, new_blk, log_blk, (int)page_off);
+
+ if (page_off) {
+ retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
+ u32 new_blk, u32 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr;
+ int zone_no, retval;
+ u16 log_off;
+ u8 page_cnt, reg_val;
+
+ RTSX_DEBUGP("%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
+ __func__, old_blk, new_blk, log_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_cnt = end_page - start_page;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ page_addr = (new_blk << xd_card->block_shift) + start_page;
+
+ retval = xd_send_cmd(chip, READ1_1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
+ 0xFF, (u8)(log_off >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
+ XD_BA_TRANSFORM);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+
+ trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_TO_DEVICE, chip->xd_timeout);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+
+ if (retval == -ETIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+
+ if (end_page == (xd_card->page_off + 1)) {
+ xd_card->delay_write.delay_write_flag = 0;
+
+ if (old_blk != BLK_NOT_FOUND) {
+ retval = xd_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS) {
+ if (XD_CHK_BAD_OLDBLK(xd_card)) {
+ xd_mark_bad_block(chip, old_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ } else {
+ xd_set_unused_block(chip, old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_NO_ERROR);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+ }
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ RTSX_READ_REG(chip, XD_DAT, &reg_val);
+ if (reg_val & PROGRAM_ERROR) {
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ xd_mark_bad_block(chip, new_blk);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+#ifdef XD_DELAY_WRITE
+int xd_delay_write(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ int retval;
+
+ if (delay_write->delay_write_flag) {
+ RTSX_DEBUGP("xd_delay_write\n");
+ retval = xd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ delay_write->delay_write_flag = 0;
+ retval = xd_finish_write(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock, delay_write->pageoff);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ unsigned int lun = SCSI_LUN(srb);
+#ifdef XD_DELAY_WRITE
+ struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+#endif
+ int retval, zone_no;
+ unsigned int index = 0, offset = 0;
+ u32 log_blk, old_blk = 0, new_blk = 0;
+ u16 log_off, total_sec_cnt = sector_cnt;
+ u8 start_page, end_page = 0, page_cnt;
+ u8 *ptr;
+
+ xd_set_err_code(chip, XD_NO_ERROR);
+
+ xd_card->cleanup_counter = 0;
+
+ RTSX_DEBUGP("xd_rw: scsi_sg_count = %d\n", scsi_sg_count(srb));
+
+ ptr = (u8 *)scsi_sglist(srb);
+
+ retval = xd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ log_blk = start_sector >> xd_card->block_shift;
+ start_page = (u8)start_sector & xd_card->page_off;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if (xd_card->zone[zone_no].build_flag == 0) {
+ retval = xd_build_l2p_tbl(chip, zone_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+#ifdef XD_DELAY_WRITE
+ if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ if (delay_write->old_phyblock != BLK_NOT_FOUND) {
+ retval = xd_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->pageoff, start_page);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page == delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else {
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if ((old_blk == BLK_NOT_FOUND) ||
+ (new_blk == BLK_NOT_FOUND)) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_prepare_write(chip, old_blk, new_blk,
+ log_blk, start_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, XD_CARD) !=
+ STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef XD_DELAY_WRITE
+ }
+#endif
+ } else {
+#ifdef XD_DELAY_WRITE
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ if (old_blk == BLK_NOT_FOUND) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_DEBUGP("old_blk = 0x%x\n", old_blk);
+
+ while (total_sec_cnt) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((start_page + total_sec_cnt) > (xd_card->page_off + 1))
+ end_page = xd_card->page_off + 1;
+ else
+ end_page = start_page + (u8)total_sec_cnt;
+
+ page_cnt = end_page - start_page;
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ retval = xd_read_multiple_pages(chip, old_blk, log_blk,
+ start_page, end_page, ptr,
+ &index, &offset);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval = xd_write_multiple_pages(chip, old_blk,
+ new_blk, log_blk,
+ start_page, end_page, ptr,
+ &index, &offset);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ total_sec_cnt -= page_cnt;
+ if (scsi_sg_count(srb) == 0)
+ ptr += page_cnt * 512;
+
+ if (total_sec_cnt == 0)
+ break;
+
+ log_blk++;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if (xd_card->zone[zone_no].build_flag == 0) {
+ retval = xd_build_l2p_tbl(chip, zone_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ if (old_blk == BLK_NOT_FOUND) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if (new_blk == BLK_NOT_FOUND) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ start_page = 0;
+ }
+
+ if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
+ (end_page != (xd_card->page_off + 1))) {
+#ifdef XD_DELAY_WRITE
+ delay_write->delay_write_flag = 1;
+ delay_write->old_phyblock = old_blk;
+ delay_write->new_phyblock = new_blk;
+ delay_write->logblock = log_blk;
+ delay_write->pageoff = end_page;
+#else
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_finish_write(chip, old_blk, new_blk,
+ log_blk, end_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return STATUS_SUCCESS;
+}
+
+void xd_free_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int i = 0;
+
+ if (xd_card->zone != NULL) {
+ for (i = 0; i < xd_card->zone_cnt; i++) {
+ if (xd_card->zone[i].l2p_table != NULL) {
+ vfree(xd_card->zone[i].l2p_table);
+ xd_card->zone[i].l2p_table = NULL;
+ }
+ if (xd_card->zone[i].free_table != NULL) {
+ vfree(xd_card->zone[i].free_table);
+ xd_card->zone[i].free_table = NULL;
+ }
+ }
+ vfree(xd_card->zone);
+ xd_card->zone = NULL;
+ }
+}
+
+void xd_cleanup_work(struct rtsx_chip *chip)
+{
+#ifdef XD_DELAY_WRITE
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ if (xd_card->delay_write.delay_write_flag) {
+ RTSX_DEBUGP("xD: delay write\n");
+ xd_delay_write(chip);
+ xd_card->cleanup_counter = 0;
+ }
+#endif
+}
+
+int xd_power_off_card3v3(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = disable_card_clock(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_OE, XD_OUTPUT_EN, 0);
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(50);
+ }
+
+ if (chip->asic_code) {
+ retval = xd_pull_ctl_disable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF, 0xDF);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int release_xd_card(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ RTSX_DEBUGP("release_xd_card\n");
+
+ chip->card_ready &= ~XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ chip->card_wp &= ~XD_CARD;
+
+ xd_card->delay_write.delay_write_flag = 0;
+
+ xd_free_l2p_tbl(chip);
+
+ retval = xd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
new file mode 100644
index 0000000..938138c
--- /dev/null
+++ b/drivers/staging/rts5208/xd.h
@@ -0,0 +1,188 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_XD_H
+#define __REALTEK_RTSX_XD_H
+
+#define XD_DELAY_WRITE
+
+/* Error Codes */
+#define XD_NO_ERROR 0x00
+#define XD_NO_MEMORY 0x80
+#define XD_PRG_ERROR 0x40
+#define XD_NO_CARD 0x20
+#define XD_READ_FAIL 0x10
+#define XD_ERASE_FAIL 0x08
+#define XD_WRITE_FAIL 0x04
+#define XD_ECC_ERROR 0x02
+#define XD_TO_ERROR 0x01
+
+/* XD Commands */
+#define READ1_1 0x00
+#define READ1_2 0x01
+#define READ2 0x50
+#define READ_ID 0x90
+#define RESET 0xff
+#define PAGE_PRG_1 0x80
+#define PAGE_PRG_2 0x10
+#define BLK_ERASE_1 0x60
+#define BLK_ERASE_2 0xD0
+#define READ_STS 0x70
+#define READ_xD_ID 0x9A
+#define COPY_BACK_512 0x8A
+#define COPY_BACK_2K 0x85
+#define READ1_1_2 0x30
+#define READ1_1_3 0x35
+#define CHG_DAT_OUT_1 0x05
+#define RDM_DAT_OUT_1 0x05
+#define CHG_DAT_OUT_2 0xE0
+#define RDM_DAT_OUT_2 0xE0
+#define CHG_DAT_OUT_2 0xE0
+#define CHG_DAT_IN_1 0x85
+#define CACHE_PRG 0x15
+
+/* Redundant Area Related */
+#define XD_EXTRA_SIZE 0x10
+#define XD_2K_EXTRA_SIZE 0x40
+
+#define NOT_WRITE_PROTECTED 0x80
+#define READY_STATE 0x40
+#define PROGRAM_ERROR 0x01
+#define PROGRAM_ERROR_N_1 0x02
+#define INTERNAL_READY 0x20
+#define READY_FLAG 0x5F
+
+#define XD_8M_X8_512 0xE6
+#define XD_16M_X8_512 0x73
+#define XD_32M_X8_512 0x75
+#define XD_64M_X8_512 0x76
+#define XD_128M_X8_512 0x79
+#define XD_256M_X8_512 0x71
+#define XD_128M_X8_2048 0xF1
+#define XD_256M_X8_2048 0xDA
+#define XD_512M_X8 0xDC
+#define XD_128M_X16_2048 0xC1
+#define XD_4M_X8_512_1 0xE3
+#define XD_4M_X8_512_2 0xE5
+#define xD_1G_X8_512 0xD3
+#define xD_2G_X8_512 0xD5
+
+#define XD_ID_CODE 0xB5
+
+#define VENDOR_BLOCK 0xEFFF
+#define CIS_BLOCK 0xDFFF
+
+#define BLK_NOT_FOUND 0xFFFFFFFF
+
+#define NO_NEW_BLK 0xFFFFFFFF
+
+#define PAGE_CORRECTABLE 0x0
+#define PAGE_NOTCORRECTABLE 0x1
+
+#define NO_OFFSET 0x0
+#define WITH_OFFSET 0x1
+
+#define Sect_Per_Page 4
+#define XD_ADDR_MODE_2C XD_ADDR_MODE_2A
+
+#define ZONE0_BAD_BLOCK 23
+#define NOT_ZONE0_BAD_BLOCK 24
+
+#define XD_RW_ADDR 0x01
+#define XD_ERASE_ADDR 0x02
+
+#define XD_PAGE_512(xd_card) \
+do { \
+ (xd_card)->block_shift = 5; \
+ (xd_card)->page_off = 0x1F; \
+} while (0)
+
+#define XD_SET_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag |= 0x01)
+#define XD_CLR_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag &= ~0x01)
+#define XD_CHK_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag & 0x01)
+
+#define XD_SET_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag |= 0x02)
+#define XD_CLR_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag &= ~0x02)
+#define XD_CHK_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag & 0x02)
+
+#define XD_SET_MBR_FAIL(xd_card) ((xd_card)->multi_flag |= 0x04)
+#define XD_CLR_MBR_FAIL(xd_card) ((xd_card)->multi_flag &= ~0x04)
+#define XD_CHK_MBR_FAIL(xd_card) ((xd_card)->multi_flag & 0x04)
+
+#define XD_SET_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag |= 0x08)
+#define XD_CLR_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag &= ~0x08)
+#define XD_CHK_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag & 0x08)
+
+#define XD_SET_4MB(xd_card) ((xd_card)->multi_flag |= 0x10)
+#define XD_CLR_4MB(xd_card) ((xd_card)->multi_flag &= ~0x10)
+#define XD_CHK_4MB(xd_card) ((xd_card)->multi_flag & 0x10)
+
+#define XD_SET_ECC_ERR(xd_card) ((xd_card)->multi_flag |= 0x40)
+#define XD_CLR_ECC_ERR(xd_card) ((xd_card)->multi_flag &= ~0x40)
+#define XD_CHK_ECC_ERR(xd_card) ((xd_card)->multi_flag & 0x40)
+
+#define PAGE_STATUS 0
+#define BLOCK_STATUS 1
+#define BLOCK_ADDR1_L 2
+#define BLOCK_ADDR1_H 3
+#define BLOCK_ADDR2_L 4
+#define BLOCK_ADDR2_H 5
+#define RESERVED0 6
+#define RESERVED1 7
+#define RESERVED2 8
+#define RESERVED3 9
+#define PARITY 10
+
+#define CIS0_0 0
+#define CIS0_1 1
+#define CIS0_2 2
+#define CIS0_3 3
+#define CIS0_4 4
+#define CIS0_5 5
+#define CIS0_6 6
+#define CIS0_7 7
+#define CIS0_8 8
+#define CIS0_9 9
+#define CIS1_0 256
+#define CIS1_1 (256 + 1)
+#define CIS1_2 (256 + 2)
+#define CIS1_3 (256 + 3)
+#define CIS1_4 (256 + 4)
+#define CIS1_5 (256 + 5)
+#define CIS1_6 (256 + 6)
+#define CIS1_7 (256 + 7)
+#define CIS1_8 (256 + 8)
+#define CIS1_9 (256 + 9)
+
+int reset_xd_card(struct rtsx_chip *chip);
+#ifdef XD_DELAY_WRITE
+int xd_delay_write(struct rtsx_chip *chip);
+#endif
+int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt);
+void xd_free_l2p_tbl(struct rtsx_chip *chip);
+void xd_cleanup_work(struct rtsx_chip *chip);
+int xd_power_off_card3v3(struct rtsx_chip *chip);
+int release_xd_card(struct rtsx_chip *chip);
+
+#endif /* __REALTEK_RTSX_XD_H */
diff --git a/drivers/staging/sb105x/sb_mp_register.h b/drivers/staging/sb105x/sb_mp_register.h
index 16de497..276c1bb 100644
--- a/drivers/staging/sb105x/sb_mp_register.h
+++ b/drivers/staging/sb105x/sb_mp_register.h
@@ -116,10 +116,10 @@
#define SB105X_FCR_TXFR 0x04 /* TX FIFO Reset */
#define SB105X_FCR_DMS 0x08 /* DMA Mode Select */
-#define SB105X_FCR_RTR08 0x00 /* Receice Trigger Level set at 8 */
-#define SB105X_FCR_RTR16 0x40 /* Receice Trigger Level set at 16 */
-#define SB105X_FCR_RTR56 0x80 /* Receice Trigger Level set at 56 */
-#define SB105X_FCR_RTR60 0xc0 /* Receice Trigger Level set at 60 */
+#define SB105X_FCR_RTR08 0x00 /* Receive Trigger Level set at 8 */
+#define SB105X_FCR_RTR16 0x40 /* Receive Trigger Level set at 16 */
+#define SB105X_FCR_RTR56 0x80 /* Receive Trigger Level set at 56 */
+#define SB105X_FCR_RTR60 0xc0 /* Receive Trigger Level set at 60 */
#define SB105X_FCR_TTR08 0x00 /* Transmit Trigger Level set at 8 */
#define SB105X_FCR_TTR16 0x10 /* Transmit Trigger Level set at 16 */
#define SB105X_FCR_TTR32 0x20 /* Transmit Trigger Level set at 32 */
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 5cd3eff..c9d6ee3 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -182,7 +182,7 @@ static int sb1054_get_register(struct sb_uart_port *port, int page, int reg)
if( page <= 0)
{
- printk(" page 0 can not use this fuction\n");
+ printk(" page 0 can not use this function\n");
return -1;
}
@@ -243,7 +243,7 @@ static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int
if( page <= 0)
{
- printk(" page 0 can not use this fuction\n");
+ printk(" page 0 can not use this function\n");
return -1;
}
switch(page)
diff --git a/drivers/staging/sb105x/sb_pci_mp.h b/drivers/staging/sb105x/sb_pci_mp.h
index 11d9299..80ae4ab 100644
--- a/drivers/staging/sb105x/sb_pci_mp.h
+++ b/drivers/staging/sb105x/sb_pci_mp.h
@@ -9,7 +9,6 @@
#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/tty_driver.h>
diff --git a/drivers/staging/sbe-2t3e3/ctrl.c b/drivers/staging/sbe-2t3e3/ctrl.c
index a5825d7f..d280bcf 100644
--- a/drivers/staging/sbe-2t3e3/ctrl.c
+++ b/drivers/staging/sbe-2t3e3/ctrl.c
@@ -31,7 +31,7 @@ void t3e3_set_frame_type(struct channel *sc, u32 mode)
sc->p.frame_type = mode;
}
-void t3e3_set_loopback(struct channel *sc, u32 mode)
+static void t3e3_set_loopback(struct channel *sc, u32 mode)
{
u32 tx, rx;
@@ -95,7 +95,7 @@ void t3e3_set_loopback(struct channel *sc, u32 mode)
}
-void t3e3_reg_read(struct channel *sc, u32 *reg, u32 *val)
+static void t3e3_reg_read(struct channel *sc, u32 *reg, u32 *val)
{
u32 i;
@@ -132,7 +132,7 @@ void t3e3_reg_read(struct channel *sc, u32 *reg, u32 *val)
}
}
-void t3e3_reg_write(struct channel *sc, u32 *reg)
+static void t3e3_reg_write(struct channel *sc, u32 *reg)
{
u32 i;
@@ -164,12 +164,12 @@ void t3e3_reg_write(struct channel *sc, u32 *reg)
}
}
-void t3e3_port_get(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_get(struct channel *sc, t3e3_param_t *param)
{
memcpy(param, &(sc->p), sizeof(t3e3_param_t));
}
-void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
{
if (param->frame_mode != 0xff)
cpld_set_frame_mode(sc, param->frame_mode);
@@ -216,7 +216,7 @@ void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
cpld_set_scrambler(sc, param->scrambler);
}
-void t3e3_port_get_stats(struct channel *sc,
+static void t3e3_port_get_stats(struct channel *sc,
t3e3_stats_t *stats)
{
u32 result;
@@ -282,7 +282,7 @@ void t3e3_port_get_stats(struct channel *sc,
memcpy(stats, &(sc->s), sizeof(t3e3_stats_t));
}
-void t3e3_port_del_stats(struct channel *sc)
+static void t3e3_port_del_stats(struct channel *sc)
{
memset(&(sc->s), 0, sizeof(t3e3_stats_t));
}
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
index b9262a7..7fc2675 100644
--- a/drivers/staging/sep/sep_crypto.c
+++ b/drivers/staging/sep/sep_crypto.c
@@ -32,7 +32,6 @@
*/
/* #define DEBUG */
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
@@ -3927,6 +3926,7 @@ int sep_crypto_setup(void)
err_algs:
for (k = 0; k < i; k++)
crypto_unregister_ahash(&hash_algs[k]);
+ destroy_workqueue(sep_dev->workqueue);
return err;
err_crypto_algs:
@@ -3945,6 +3945,7 @@ void sep_crypto_takedown(void)
for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
crypto_unregister_alg(&crypto_algs[i]);
+ destroy_workqueue(sep_dev->workqueue);
tasklet_kill(&sep_dev->finish_tasklet);
}
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index 1e80a40..122614c 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -39,7 +39,6 @@
/* #define DEBUG */
/* #define SEP_PERF_DEBUG */
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
@@ -4292,7 +4291,7 @@ static void sep_remove(struct pci_dev *pdev)
}
/* Initialize struct pci_device_id for our driver */
-static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+static const struct pci_device_id sep_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
{0}
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 73fc3cc..f0fcbf7 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -5,7 +5,6 @@
*/
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -970,17 +969,11 @@ static void qt_block_until_empty(struct tty_struct *tty,
{
int timeout = HZ / 10;
int wait = 30;
- int count;
-
- while (1) {
-
- count = qt_chars_in_buffer(tty);
-
- if (count <= 0)
- return;
-
- interruptible_sleep_on_timeout(&qt_port->wait, timeout);
+ /* returns if we get a signal, an error, or the buffer is empty */
+ while (wait_event_interruptible_timeout(qt_port->wait,
+ qt_chars_in_buffer(tty) <= 0,
+ timeout) == 0) {
wait--;
if (wait == 0) {
dev_dbg(&qt_port->port->dev, "%s - TIMEOUT", __func__);
@@ -1137,7 +1130,10 @@ static int qt_ioctl(struct tty_struct *tty,
if (cmd == TIOCMIWAIT) {
while (qt_port != NULL) {
+#if 0
+ /* this never wakes up */
interruptible_sleep_on(&qt_port->msr_wait);
+#endif
if (signal_pending(current))
return -ERESTARTSYS;
else {
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 39dc92a..20325f5 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -135,8 +135,6 @@ static int bp_get_dev_idx_bsf(struct net_device *dev, int *index)
else
return -EOPNOTSUPP;
- if (!drvinfo.bus_info)
- return -ENODATA;
if (!strcmp(drvinfo.bus_info, "N/A"))
return -ENODATA;
diff --git a/drivers/staging/silicom/bypasslib/bypass.c b/drivers/staging/silicom/bypasslib/bypass.c
index ba0d23a..09e00da 100644
--- a/drivers/staging/silicom/bypasslib/bypass.c
+++ b/drivers/staging/silicom/bypasslib/bypass.c
@@ -7,11 +7,11 @@
/* the Free Software Foundation, located in the file LICENSE. */
/* */
/* */
-/* bypass.c */
+/* bypass.c */
/* */
/******************************************************************************/
-#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#if defined(CONFIG_SMP) && !defined(__SMP__)
#define __SMP__
#endif
@@ -22,7 +22,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
-#include <linux/netdevice.h> // struct device, and other headers
+#include <linux/netdevice.h> /* struct device, and other headers */
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
@@ -40,20 +40,17 @@ MODULE_AUTHOR("www.silicom.co.il");
MODULE_LICENSE("GPL");
-int init_lib_module(void);
-void cleanup_lib_module(void);
-
static int do_cmd(struct net_device *dev, struct ifreq *ifr, int cmd, int *data)
{
int ret = -1;
struct if_bypass *bypass_cb;
- static int (*ioctl) (struct net_device *, struct ifreq *, int);
bypass_cb = (struct if_bypass *)ifr;
bypass_cb->cmd = cmd;
bypass_cb->data = *data;
- if ((dev->netdev_ops) && (ioctl = dev->netdev_ops->ndo_do_ioctl)) {
- ret = ioctl(dev, ifr, SIOCGIFBYPASS);
+
+ if (dev->netdev_ops && dev->netdev_ops->ndo_do_ioctl) {
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, ifr, SIOCGIFBYPASS);
*data = bypass_cb->data;
}
@@ -66,13 +63,12 @@ static int doit(int cmd, int if_index, int *data)
int ret = -1;
struct net_device *dev;
struct net_device *n;
- for_each_netdev_safe(&init_net, dev, n) {
+ for_each_netdev_safe(&init_net, dev, n) {
if (dev->ifindex == if_index) {
ret = do_cmd(dev, &ifr, cmd, data);
if (ret < 0)
ret = -1;
-
}
}
@@ -82,56 +78,65 @@ static int doit(int cmd, int if_index, int *data)
#define bp_symbol_get(fn_name) symbol_get(fn_name)
#define bp_symbol_put(fn_name) symbol_put(fn_name)
-#define SET_BPLIB_INT_FN(fn_name, arg_type, arg, ret) \
- ({ int (* fn_ex)(arg_type)=NULL; \
- fn_ex=bp_symbol_get(fn_name##_sd); \
- if(fn_ex) { \
- ret= fn_ex(arg); \
- bp_symbol_put(fn_name##_sd); \
- } else ret=-1; \
- })
-
-#define SET_BPLIB_INT_FN2(fn_name, arg_type, arg, arg_type1, arg1, ret) \
- ({ int (* fn_ex)(arg_type,arg_type1)=NULL; \
- fn_ex=bp_symbol_get(fn_name##_sd); \
- if(fn_ex) { \
- ret= fn_ex(arg,arg1); \
- bp_symbol_put(fn_name##_sd); \
- } else ret=-1; \
- })
-#define SET_BPLIB_INT_FN3(fn_name, arg_type, arg, arg_type1, arg1,arg_type2, arg2, ret) \
- ({ int (* fn_ex)(arg_type,arg_type1, arg_type2)=NULL; \
- fn_ex=bp_symbol_get(fn_name##_sd); \
- if(fn_ex) { \
- ret= fn_ex(arg,arg1,arg2); \
- bp_symbol_put(fn_name##_sd); \
- } else ret=-1; \
- })
-
-#define DO_BPLIB_GET_ARG_FN(fn_name,ioctl_val, if_index) \
- ({ int data, ret=0; \
- if(is_dev_sd(if_index)){ \
- SET_BPLIB_INT_FN(fn_name, int, if_index, ret); \
- return ret; \
- } \
- return doit(ioctl_val,if_index, &data); \
- })
-
-#define DO_BPLIB_SET_ARG_FN(fn_name,ioctl_val,if_index,arg) \
- ({ int data, ret=0; \
- if(is_dev_sd(if_index)){ \
- SET_BPLIB_INT_FN2(fn_name, int, if_index, int, arg, ret); \
- return ret; \
- } \
- data=arg; \
- return doit(ioctl_val,if_index, &data); \
- })
+#define SET_BPLIB_INT_FN(fn_name, arg_type, arg, ret) \
+({ int (*fn_ex)(arg_type) = NULL; \
+ fn_ex = bp_symbol_get(fn_name##_sd); \
+ if (fn_ex) { \
+ ret = fn_ex(arg); \
+ bp_symbol_put(fn_name##_sd); \
+ } else { \
+ ret = -1; \
+ } \
+})
+
+#define SET_BPLIB_INT_FN2(fn_name, arg_type, arg, arg_type1, arg1, ret)\
+({ int (*fn_ex)(arg_type, arg_type1) = NULL; \
+ fn_ex = bp_symbol_get(fn_name##_sd); \
+ if (fn_ex) { \
+ ret = fn_ex(arg, arg1); \
+ bp_symbol_put(fn_name##_sd); \
+ } else { \
+ ret = -1; \
+ } \
+})
+
+#define SET_BPLIB_INT_FN3(fn_name, arg_type, arg, arg_type1, arg1, \
+ arg_type2, arg2, ret) \
+({ int (*fn_ex)(arg_type, arg_type1, arg_type2) = NULL; \
+ fn_ex = bp_symbol_get(fn_name##_sd); \
+ if (fn_ex) { \
+ ret = fn_ex(arg, arg1, arg2); \
+ bp_symbol_put(fn_name##_sd); \
+ } else { \
+ ret = -1; \
+ } \
+})
+
+#define DO_BPLIB_GET_ARG_FN(fn_name, ioctl_val, if_index) \
+({ int data, ret = 0; \
+ if (is_dev_sd(if_index)) { \
+ SET_BPLIB_INT_FN(fn_name, int, if_index, ret); \
+ return ret; \
+ } \
+ return doit(ioctl_val, if_index, &data); \
+})
+
+#define DO_BPLIB_SET_ARG_FN(fn_name, ioctl_val, if_index, arg) \
+({ int data, ret = 0; \
+ if (is_dev_sd(if_index)) { \
+ SET_BPLIB_INT_FN2(fn_name, int, if_index, int, \
+ arg, ret); \
+ return ret; \
+ } \
+ data = arg; \
+ return doit(ioctl_val, if_index, &data); \
+})
static int is_dev_sd(int if_index)
{
int ret = 0;
SET_BPLIB_INT_FN(is_bypass, int, if_index, ret);
- return (ret >= 0 ? 1 : 0);
+ return ret >= 0 ? 1 : 0;
}
static int is_bypass_dev(int if_index)
@@ -139,16 +144,19 @@ static int is_bypass_dev(int if_index)
struct pci_dev *pdev = NULL;
struct net_device *dev = NULL;
struct ifreq ifr;
- int ret = 0, data = 0;
+ int ret = 0;
+ int data = 0;
while ((pdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
- if ((dev = pci_get_drvdata(pdev)) != NULL)
- if (((dev = pci_get_drvdata(pdev)) != NULL) &&
- (dev->ifindex == if_index)) {
+ dev = pci_get_drvdata(pdev);
+ if (dev != NULL) {
+ dev = pci_get_drvdata(pdev);
+ if ((dev != NULL) && (dev->ifindex == if_index)) {
if ((pdev->vendor == SILICOM_VID) &&
(pdev->device >= SILICOM_BP_PID_MIN) &&
- (pdev->device <= SILICOM_BP_PID_MAX))
+ (pdev->device <= SILICOM_BP_PID_MAX)) {
goto send_cmd;
+ }
#if defined(BP_VENDOR_SUPPORT) && defined(ETHTOOL_GDRVINFO)
else {
struct ethtool_drvinfo info;
@@ -173,10 +181,11 @@ static int is_bypass_dev(int if_index)
#endif
return -1;
}
+ }
}
send_cmd:
ret = do_cmd(dev, &ifr, IS_BYPASS, &data);
- return (ret < 0 ? -1 : ret);
+ return ret < 0 ? -1 : ret;
}
static int is_bypass(int if_index)
@@ -267,11 +276,13 @@ EXPORT_SYMBOL(get_bypass_pwup);
static int set_bypass_wd(int if_index, int ms_timeout, int *ms_timeout_set)
{
- int data = ms_timeout, ret = 0;
- if (is_dev_sd(if_index))
+ int data = ms_timeout;
+ int ret = 0;
+
+ if (is_dev_sd(if_index)) {
SET_BPLIB_INT_FN3(set_bypass_wd, int, if_index, int, ms_timeout,
int *, ms_timeout_set, ret);
- else {
+ } else {
ret = doit(SET_BYPASS_WD, if_index, &data);
if (ret > 0) {
*ms_timeout_set = ret;
@@ -284,7 +295,9 @@ EXPORT_SYMBOL(set_bypass_wd);
static int get_bypass_wd(int if_index, int *ms_timeout_set)
{
- int *data = ms_timeout_set, ret = 0;
+ int *data = ms_timeout_set;
+ int ret = 0;
+
if (is_dev_sd(if_index))
SET_BPLIB_INT_FN2(get_bypass_wd, int, if_index, int *,
ms_timeout_set, ret);
@@ -297,10 +310,11 @@ EXPORT_SYMBOL(get_bypass_wd);
static int get_wd_expire_time(int if_index, int *ms_time_left)
{
int *data = ms_time_left, ret = 0;
- if (is_dev_sd(if_index))
+
+ if (is_dev_sd(if_index)) {
SET_BPLIB_INT_FN2(get_wd_expire_time, int, if_index, int *,
ms_time_left, ret);
- else {
+ } else {
ret = doit(GET_WD_EXPIRE_TIME, if_index, data);
if ((ret == 0) && (*data != 0))
ret = 1;
@@ -476,14 +490,14 @@ EXPORT_SYMBOL(get_bp_hw_reset);
static int get_bypass_info(int if_index, struct bp_info *bp_info)
{
int ret = 0;
+
if (is_dev_sd(if_index)) {
SET_BPLIB_INT_FN2(get_bypass_info, int, if_index,
struct bp_info *, bp_info, ret);
} else {
- static int (*ioctl) (struct net_device *, struct ifreq *, int);
struct net_device *dev;
-
struct net_device *n;
+
for_each_netdev_safe(&init_net, dev, n) {
if (dev->ifindex == if_index) {
struct if_bypass_info *bypass_cb;
@@ -493,17 +507,16 @@ static int get_bypass_info(int if_index, struct bp_info *bp_info)
bypass_cb = (struct if_bypass_info *)&ifr;
bypass_cb->cmd = GET_BYPASS_INFO;
- if ((dev->netdev_ops) &&
- (ioctl = dev->netdev_ops->ndo_do_ioctl)) {
- ret = ioctl(dev, &ifr, SIOCGIFBYPASS);
- }
-
+ if (dev->netdev_ops &&
+ dev->netdev_ops->ndo_do_ioctl)
+ ret = dev->netdev_ops->ndo_do_ioctl(dev,
+ &ifr, SIOCGIFBYPASS);
else
ret = -1;
if (ret == 0)
memcpy(bp_info, &bypass_cb->bp_info,
sizeof(struct bp_info));
- ret = (ret < 0 ? -1 : 0);
+ ret = ret < 0 ? -1 : 0;
break;
}
}
@@ -512,14 +525,13 @@ static int get_bypass_info(int if_index, struct bp_info *bp_info)
}
EXPORT_SYMBOL(get_bypass_info);
-int init_lib_module(void)
+static int __init init_lib_module(void)
{
-
printk(VERSION);
return 0;
}
-void cleanup_lib_module(void)
+static void __exit cleanup_lib_module(void)
{
}
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index cb04a87..53052c4 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -14,7 +14,6 @@ TODO:
- use net_device_ops
- use dev->stats rather than adapter->stats
- don't cast netdev_priv it is already void
- - use compare_ether_addr
- GET RID OF MACROS
- work on all architectures
- without CONFIG_X86_64 confusion
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 652272b..1426ca4 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -136,7 +136,7 @@ MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
module_param(intagg_delay, int, 0);
MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
-static DEFINE_PCI_DEVICE_TABLE(slic_pci_tbl) = {
+static const struct pci_device_id slic_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
{ 0 }
@@ -595,15 +595,12 @@ static void slic_adapter_set_hwaddr(struct adapter *adapter)
memcpy(adapter->macaddr,
card->config.MacInfo[adapter->functionnumber].macaddrA,
sizeof(struct slic_config_mac));
- if (!(adapter->currmacaddr[0] || adapter->currmacaddr[1] ||
- adapter->currmacaddr[2] || adapter->currmacaddr[3] ||
- adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
- memcpy(adapter->currmacaddr, adapter->macaddr, 6);
- }
- if (adapter->netdev) {
+ if (is_zero_ether_addr(adapter->currmacaddr))
+ memcpy(adapter->currmacaddr, adapter->macaddr,
+ ETH_ALEN);
+ if (adapter->netdev)
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
- 6);
- }
+ ETH_ALEN);
}
}
@@ -767,13 +764,11 @@ static bool slic_mac_filter(struct adapter *adapter,
{
struct net_device *netdev = adapter->netdev;
u32 opts = adapter->macopts;
- u32 *dhost4 = (u32 *)&ether_frame->ether_dhost[0];
- u16 *dhost2 = (u16 *)&ether_frame->ether_dhost[4];
if (opts & MAC_PROMISC)
return true;
- if ((*dhost4 == 0xFFFFFFFF) && (*dhost2 == 0xFFFF)) {
+ if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
if (opts & MAC_BCAST) {
adapter->rcv_broadcasts++;
return true;
@@ -782,7 +777,7 @@ static bool slic_mac_filter(struct adapter *adapter,
}
}
- if (ether_frame->ether_dhost[0] & 0x01) {
+ if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
if (opts & MAC_ALLMCAST) {
adapter->rcv_multicasts++;
netdev->stats.multicast++;
@@ -2335,7 +2330,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
if (mcaddr == NULL)
return 1;
- memcpy(mcaddr->address, address, 6);
+ memcpy(mcaddr->address, address, ETH_ALEN);
mcaddr->next = adapter->mcastaddrs;
adapter->mcastaddrs = mcaddr;
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
index ba199ff..6176d98 100644
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ b/drivers/staging/sm7xxfb/sm7xxfb.c
@@ -585,7 +585,7 @@ static void smtc_set_timing(struct smtcfb_info *sfb)
}
}
-void smtcfb_setmode(struct smtcfb_info *sfb)
+static void smtcfb_setmode(struct smtcfb_info *sfb)
{
switch (sfb->fb.var.bits_per_pixel) {
case 32:
@@ -920,7 +920,7 @@ failed_free:
* 0x712 (LynxEM+)
* 0x720 (Lynx3DM, Lynx3DM+)
*/
-static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
+static const struct pci_device_id smtcfb_pci_table[] = {
{ PCI_DEVICE(0x126f, 0x710), },
{ PCI_DEVICE(0x126f, 0x712), },
{ PCI_DEVICE(0x126f, 0x720), },
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 47502fa..ef5933b 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -37,8 +37,6 @@
#include <linux/input.h>
#include <linux/kmod.h>
-#include <linux/bootmem.h> /* for alloc_bootmem */
-
/* speakup_*_selection */
#include <linux/module.h>
#include <linux/sched.h>
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 1354288..4e18fb4 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -6,6 +6,10 @@
#include "spk_priv.h"
#include "serialio.h"
+#ifndef SERIAL_PORT_DFNS
+#define SERIAL_PORT_DFNS
+#endif
+
static void start_serial_interrupt(int irq);
static const struct old_serial_port rs_table[] = {
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 55d68b5..0a93773 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -36,30 +36,4 @@ struct old_serial_port {
#define spk_serial_tx_busy() ((inb(speakup_info.port_tts + UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
-/* 2.6.22 doesn't have them any more, hardcode it for now (these values should
- * be fine for 99% cases) */
-#ifndef BASE_BAUD
-#define BASE_BAUD (1843200 / 16)
-#endif
-#ifndef STD_COM_FLAGS
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
-#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
-#endif
-#endif
-#ifndef SERIAL_PORT_DFNS
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
-#endif
-#ifndef IRQF_SHARED
-#define IRQF_SHARED SA_SHIRQ
-#endif
-
#endif
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 8c8c92a..adb21c5 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o
-libgen = gen/gh.o gen/uuidutil.o
+libgen = gen/gh.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
core/tiomap3430_pwr.o core/tiomap_io.o \
core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index 25eaef7..936470c 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -14,56 +14,45 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/slab.h>
-#include <dspbridge/host_os.h>
-#include <dspbridge/gh.h>
-
-struct element {
- struct element *next;
- u8 data[1];
+struct gh_node {
+ struct hlist_node hl;
+ u8 data[0];
};
+#define GH_HASH_ORDER 8
+
struct gh_t_hash_tab {
- u16 max_bucket;
- u16 val_size;
- struct element **buckets;
- u16(*hash) (void *, u16);
- bool(*match) (void *, void *);
- void (*delete) (void *);
+ u32 val_size;
+ DECLARE_HASHTABLE(hash_table, GH_HASH_ORDER);
+ u32 (*hash)(const void *key);
+ bool (*match)(const void *key, const void *value);
+ void (*delete)(void *key);
};
-static void noop(void *p);
-
/*
* ======== gh_create ========
*/
-struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
- u16(*hash) (void *, u16), bool(*match) (void *,
- void *),
- void (*delete) (void *))
+struct gh_t_hash_tab *gh_create(u32 val_size, u32 (*hash)(const void *),
+ bool (*match)(const void *, const void *),
+ void (*delete)(void *))
{
struct gh_t_hash_tab *hash_tab;
- u16 i;
+
hash_tab = kzalloc(sizeof(struct gh_t_hash_tab), GFP_KERNEL);
- if (hash_tab == NULL)
- return NULL;
- hash_tab->max_bucket = max_bucket;
+ if (!hash_tab)
+ return ERR_PTR(-ENOMEM);
+
+ hash_init(hash_tab->hash_table);
+
hash_tab->val_size = val_size;
hash_tab->hash = hash;
hash_tab->match = match;
- hash_tab->delete = delete == NULL ? noop : delete;
-
- hash_tab->buckets =
- kzalloc(sizeof(struct element *) * max_bucket, GFP_KERNEL);
- if (hash_tab->buckets == NULL) {
- gh_delete(hash_tab);
- return NULL;
- }
-
- for (i = 0; i < max_bucket; i++)
- hash_tab->buckets[i] = NULL;
+ hash_tab->delete = delete;
return hash_tab;
}
@@ -73,21 +62,16 @@ struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
*/
void gh_delete(struct gh_t_hash_tab *hash_tab)
{
- struct element *elem, *next;
- u16 i;
-
- if (hash_tab != NULL) {
- if (hash_tab->buckets != NULL) {
- for (i = 0; i < hash_tab->max_bucket; i++) {
- for (elem = hash_tab->buckets[i]; elem != NULL;
- elem = next) {
- next = elem->next;
- (*hash_tab->delete) (elem->data);
- kfree(elem);
- }
- }
-
- kfree(hash_tab->buckets);
+ struct gh_node *n;
+ struct hlist_node *tmp;
+ u32 i;
+
+ if (hash_tab) {
+ hash_for_each_safe(hash_tab->hash_table, i, tmp, n, hl) {
+ hash_del(&n->hl);
+ if (hash_tab->delete)
+ hash_tab->delete(n->data);
+ kfree(n);
}
kfree(hash_tab);
@@ -98,56 +82,39 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
* ======== gh_find ========
*/
-void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
+void *gh_find(struct gh_t_hash_tab *hash_tab, const void *key)
{
- struct element *elem;
+ struct gh_node *n;
+ u32 key_hash = hash_tab->hash(key);
- elem = hash_tab->buckets[(*hash_tab->hash) (key, hash_tab->max_bucket)];
-
- for (; elem; elem = elem->next) {
- if ((*hash_tab->match) (key, elem->data))
- return elem->data;
+ hash_for_each_possible(hash_tab->hash_table, n, hl, key_hash) {
+ if (hash_tab->match(key, n->data))
+ return n->data;
}
- return NULL;
+ return ERR_PTR(-ENODATA);
}
/*
* ======== gh_insert ========
*/
-void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value)
+void *gh_insert(struct gh_t_hash_tab *hash_tab, const void *key,
+ const void *value)
{
- struct element *elem;
- u16 i;
- char *src, *dst;
+ struct gh_node *n;
- elem = kzalloc(sizeof(struct element) - 1 + hash_tab->val_size,
+ n = kmalloc(sizeof(struct gh_node) + hash_tab->val_size,
GFP_KERNEL);
- if (elem != NULL) {
-
- dst = (char *)elem->data;
- src = (char *)value;
- for (i = 0; i < hash_tab->val_size; i++)
- *dst++ = *src++;
- i = (*hash_tab->hash) (key, hash_tab->max_bucket);
- elem->next = hash_tab->buckets[i];
- hash_tab->buckets[i] = elem;
+ if (!n)
+ return ERR_PTR(-ENOMEM);
- return elem->data;
- }
-
- return NULL;
-}
+ INIT_HLIST_NODE(&n->hl);
+ hash_add(hash_tab->hash_table, &n->hl, hash_tab->hash(key));
+ memcpy(n->data, value, hash_tab->val_size);
-/*
- * ======== noop ========
- */
-/* ARGSUSED */
-static void noop(void *p)
-{
- p = p; /* stifle compiler warning */
+ return n->data;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
@@ -162,16 +129,13 @@ static void noop(void *p)
void gh_iterate(struct gh_t_hash_tab *hash_tab,
void (*callback)(void *, void *), void *user_data)
{
- struct element *elem;
+ struct gh_node *n;
u32 i;
- if (hash_tab && hash_tab->buckets)
- for (i = 0; i < hash_tab->max_bucket; i++) {
- elem = hash_tab->buckets[i];
- while (elem) {
- callback(&elem->data, user_data);
- elem = elem->next;
- }
- }
+ if (!hash_tab)
+ return;
+
+ hash_for_each(hash_tab->hash_table, i, n, hl)
+ callback(&n->data, user_data);
}
#endif
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
deleted file mode 100644
index b7d8313..0000000
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * uuidutil.c
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * This file contains the implementation of UUID helper functions.
- *
- * Copyright (C) 2005-2006 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-#include <linux/types.h>
-
-/* ----------------------------------- Host OS */
-#include <dspbridge/host_os.h>
-
-/* ----------------------------------- DSP/BIOS Bridge */
-#include <dspbridge/dbdefs.h>
-
-/* ----------------------------------- This */
-#include <dspbridge/uuidutil.h>
-
-static s32 uuid_hex_to_bin(char *buf, s32 len)
-{
- s32 i;
- s32 result = 0;
- int value;
-
- for (i = 0; i < len; i++) {
- value = hex_to_bin(*buf++);
- result *= 16;
- if (value > 0)
- result += value;
- }
-
- return result;
-}
-
-/*
- * ======== uuid_uuid_from_string ========
- * Purpose:
- * Converts a string to a struct dsp_uuid.
- */
-void uuid_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
-{
- s32 j;
-
- uuid_obj->data1 = uuid_hex_to_bin(sz_uuid, 8);
- sz_uuid += 8;
-
- /* Step over underscore */
- sz_uuid++;
-
- uuid_obj->data2 = (u16) uuid_hex_to_bin(sz_uuid, 4);
- sz_uuid += 4;
-
- /* Step over underscore */
- sz_uuid++;
-
- uuid_obj->data3 = (u16) uuid_hex_to_bin(sz_uuid, 4);
- sz_uuid += 4;
-
- /* Step over underscore */
- sz_uuid++;
-
- uuid_obj->data4 = (u8) uuid_hex_to_bin(sz_uuid, 2);
- sz_uuid += 2;
-
- uuid_obj->data5 = (u8) uuid_hex_to_bin(sz_uuid, 2);
- sz_uuid += 2;
-
- /* Step over underscore */
- sz_uuid++;
-
- for (j = 0; j < 6; j++) {
- uuid_obj->data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2);
- sz_uuid += 2;
- }
-}
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h
index da85079..e4303b4 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/gh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/gh.h
@@ -18,13 +18,13 @@
#define GH_
#include <dspbridge/host_os.h>
-extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
- u16(*hash) (void *, u16),
- bool(*match) (void *, void *),
- void (*delete) (void *));
+extern struct gh_t_hash_tab *gh_create(u32 val_size,
+ u32 (*hash)(const void *), bool (*match)(const void *,
+ const void *), void (*delete) (void *));
extern void gh_delete(struct gh_t_hash_tab *hash_tab);
-extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
-extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
+extern void *gh_find(struct gh_t_hash_tab *hash_tab, const void *key);
+extern void *gh_insert(struct gh_t_hash_tab *hash_tab, const void *key,
+ const void *value);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
void gh_iterate(struct gh_t_hash_tab *hash_tab,
void (*callback)(void *, void *), void *user_data);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
index 414bf71..b4951a1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
@@ -21,22 +21,4 @@
#define MAXUUIDLEN 37
-/*
- * ======== uuid_uuid_from_string ========
- * Purpose:
- * Converts an ANSI string to a dsp_uuid.
- * Parameters:
- * sz_uuid: Pointer to a string that represents a dsp_uuid object.
- * uuid_obj: Pointer to a dsp_uuid object.
- * Returns:
- * Requires:
- * uuid_obj & sz_uuid are non-NULL values.
- * Ensures:
- * Details:
- * We assume the string representation of a UUID has the following format:
- * "12345678_1234_1234_1234_123456789abc".
- */
-extern void uuid_uuid_from_string(char *sz_uuid,
- struct dsp_uuid *uuid_obj);
-
#endif /* UUIDUTIL_ */
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index 4a800da..f961e0e 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -359,7 +359,7 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
* Return the communication memory manager object for this device.
* This is typically called from the client process.
*/
-int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
+int cmm_get_handle(void *hprocessor, struct cmm_object **ph_cmm_mgr)
{
int status = 0;
struct dev_object *hdev_obj;
@@ -449,8 +449,7 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_mnode *new_node;
s32 slot_seg;
- dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
- "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
+ dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
__func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
dw_dsp_base, ul_dsp_size, gpp_base_va);
@@ -828,7 +827,7 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
* Purpose:
* Set/Get translator info.
*/
-int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
+int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 **paddr,
u32 ul_size, u32 segm_id, bool set_info)
{
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 41e88ab..8e21d1e 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -33,9 +33,6 @@
#include <dspbridge/dbll.h>
#include <dspbridge/rmm.h>
-/* Number of buckets for symbol hash table */
-#define MAXBUCKETS 211
-
/* Max buffer length */
#define MAXEXPR 128
@@ -183,8 +180,8 @@ static int execute(struct dynamic_loader_initialize *this, ldr_addr start);
static void release(struct dynamic_loader_initialize *this);
/* symbol table hash functions */
-static u16 name_hash(void *key, u16 max_bucket);
-static bool name_match(void *key, void *sp);
+static u32 name_hash(const void *key);
+static bool name_match(const void *key, const void *sp);
static void sym_delete(void *value);
/* Symbol Redefinition */
@@ -277,17 +274,16 @@ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
struct dbll_sym_val **sym_val)
{
struct dbll_symbol *sym;
- bool status = false;
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
- if (sym != NULL) {
- *sym_val = &sym->value;
- status = true;
- }
+ if (IS_ERR(sym))
+ return false;
- dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n",
- __func__, zl_lib, name, sym_val, status);
- return status;
+ *sym_val = &sym->value;
+
+ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p\n",
+ __func__, zl_lib, name, sym_val);
+ return true;
}
/*
@@ -312,7 +308,6 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
{
struct dbll_symbol *sym;
char cname[MAXEXPR + 1];
- bool status = false;
cname[0] = '_';
@@ -321,13 +316,12 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
/* Check for C name, if not found */
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname);
+ if (IS_ERR(sym))
+ return false;
- if (sym != NULL) {
- *sym_val = &sym->value;
- status = true;
- }
+ *sym_val = &sym->value;
- return status;
+ return true;
}
/*
@@ -378,8 +372,8 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
opened_doff = false;
}
- dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, "
- "status 0x%x\n", __func__, lib, name, paddr, psize, status);
+ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, status 0x%x\n",
+ __func__, lib, name, paddr, psize, status);
return status;
}
@@ -416,12 +410,13 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
/* Create a hash table for symbols if not already created */
if (zl_lib->sym_tab == NULL) {
got_symbols = false;
- zl_lib->sym_tab = gh_create(MAXBUCKETS,
- sizeof(struct dbll_symbol),
+ zl_lib->sym_tab = gh_create(sizeof(struct dbll_symbol),
name_hash,
name_match, sym_delete);
- if (zl_lib->sym_tab == NULL)
- status = -ENOMEM;
+ if (IS_ERR(zl_lib->sym_tab)) {
+ status = PTR_ERR(zl_lib->sym_tab);
+ zl_lib->sym_tab = NULL;
+ }
}
/*
@@ -593,10 +588,11 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
goto func_cont;
zl_lib->sym_tab =
- gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash,
- name_match, sym_delete);
- if (zl_lib->sym_tab == NULL) {
- status = -ENOMEM;
+ gh_create(sizeof(struct dbll_symbol), name_hash, name_match,
+ sym_delete);
+ if (IS_ERR(zl_lib->sym_tab)) {
+ status = PTR_ERR(zl_lib->sym_tab);
+ zl_lib->sym_tab = NULL;
} else {
/* Do a fake load to get symbols - set write func to no_op */
zl_lib->init.dl_init.writemem = no_op;
@@ -705,8 +701,8 @@ func_cont:
opened_doff = false;
}
- dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, "
- "status 0x%x\n", __func__, lib, name, buf, size, status);
+ dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, status 0x%x\n",
+ __func__, lib, name, buf, size, status);
return status;
}
@@ -793,11 +789,10 @@ static int dof_open(struct dbll_library_obj *zl_lib)
/*
* ======== name_hash ========
*/
-static u16 name_hash(void *key, u16 max_bucket)
+static u32 name_hash(const void *key)
{
- u16 ret;
- u16 hash;
- char *name = (char *)key;
+ u32 hash;
+ const char *name = key;
hash = 0;
@@ -806,19 +801,16 @@ static u16 name_hash(void *key, u16 max_bucket)
hash ^= *name++;
}
- ret = hash % max_bucket;
-
- return ret;
+ return hash;
}
/*
* ======== name_match ========
*/
-static bool name_match(void *key, void *sp)
+static bool name_match(const void *key, const void *sp)
{
if ((key != NULL) && (sp != NULL)) {
- if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
- 0)
+ if (strcmp(key, ((struct dbll_symbol *)sp)->name) == 0)
return true;
}
return false;
@@ -915,10 +907,10 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
status = dbll_get_addr((struct dbll_library_obj *)lib,
(char *)name, &dbll_sym);
if (!status) {
- status =
- dbll_get_c_addr((struct dbll_library_obj *)
- lib, (char *)name,
- &dbll_sym);
+ status = dbll_get_c_addr(
+ (struct dbll_library_obj *)
+ lib, (char *)name,
+ &dbll_sym);
}
}
}
@@ -937,7 +929,6 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
*this, const char *name,
unsigned moduleid)
{
- struct dynload_symbol *ret_sym;
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
struct dbll_symbol *sym;
@@ -945,8 +936,10 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
lib = ldr_sym->lib;
sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
- ret_sym = (struct dynload_symbol *)&sym->value;
- return ret_sym;
+ if (IS_ERR(sym))
+ return NULL;
+
+ return (struct dynload_symbol *)&sym->value;
}
/*
@@ -991,8 +984,10 @@ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
sym_ptr =
(struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name,
(void *)&symbol);
- if (sym_ptr == NULL)
+ if (IS_ERR(sym_ptr)) {
kfree(symbol.name);
+ sym_ptr = NULL;
+ }
}
if (sym_ptr != NULL)
@@ -1172,8 +1167,7 @@ func_cont:
if (!run_addr_flag)
info->run_addr = info->load_addr;
info->context = (u32) rmm_addr_obj.segid;
- dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, "
- "info->run_addr 0x%x, info->load_addr 0x%x\n",
+ dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, info->run_addr 0x%x, info->load_addr 0x%x\n",
__func__, info->name, info->load_addr / DSPWORDSIZE,
info->size / DSPWORDSIZE, info->run_addr,
info->load_addr);
@@ -1399,7 +1393,7 @@ void find_symbol_callback(void *elem, void *user_data)
* @sym_addr_output: Symbol Output address
* @name_output: String with the dsp symbol
*
- * This function retrieves the dsp symbol from the dsp binary.
+ * This function retrieves the dsp symbol from the dsp binary.
*/
bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
u32 offset_range, u32 *sym_addr_output,
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 6234ffb..616dc1f 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -606,7 +606,7 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
* ======== dev_get_symbol ========
*/
int dev_get_symbol(struct dev_object *hdev_obj,
- const char *str_sym, u32 * pul_value)
+ const char *str_sym, u32 *pul_value)
{
int status = 0;
struct cod_manager *cod_mgr;
@@ -916,8 +916,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
/* Local helper macro: */
#define STORE_FXN(cast, pfn) \
- (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
- (cast)fxn_not_implemented))
+ (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
+ (cast)fxn_not_implemented))
bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
drv_fxns->brd_api_minor_version);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
index 7c9f839..fcf564a 100644
--- a/drivers/staging/tidspbridge/pmgr/dmm.c
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -217,8 +217,8 @@ int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
status = -ENOENT;
spin_unlock(&dmm_obj->dmm_lock);
- dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
- "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
+ dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, chunk %p",
+ __func__, dmm_mgr, addr, size, status, chunk);
return status;
}
@@ -268,9 +268,9 @@ int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
spin_unlock(&dmm_obj->dmm_lock);
- dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
- "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
- prsv_addr, status, rsv_addr, rsv_size);
+ dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, rsv_addr %x, rsv_size %x\n",
+ __func__, dmm_mgr, size,
+ prsv_addr, status, rsv_addr, rsv_size);
return status;
}
@@ -299,8 +299,8 @@ int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
}
spin_unlock(&dmm_obj->dmm_lock);
- dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
- "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
+ dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, chunk %p\n",
+ __func__, dmm_mgr, addr, psize, status, chunk);
return status;
}
@@ -475,11 +475,11 @@ u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
}
}
spin_unlock(&dmm_mgr->dmm_lock);
- printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+ dev_info(bridge, "Total DSP VA FREE memory = %d Mbytes\n",
freemem / (1024 * 1024));
- printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+ dev_info(bridge, "Total DSP VA USED memory= %d Mbytes\n",
(((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
- printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+ dev_info(bridge, "DSP VA - Biggest FREE block = %d Mbytes\n",
(bigsize * PG_SIZE4K / (1024 * 1024)));
return 0;
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 70db4ff..b7d5c8c 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -162,7 +162,7 @@ static u8 size_cmd[] = {
ARRAY_SIZE(cmm_cmd),
};
-static inline void _cp_fm_usr(void *to, const void __user * from,
+static inline void _cp_fm_usr(void *to, const void __user *from,
int *err, unsigned long bytes)
{
if (*err)
@@ -507,7 +507,7 @@ u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, void *pr_ctxt)
/*
* ======== MGRWRAP_GetProcessResourceInfo ========
*/
-u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args * args,
+u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args *args,
void *pr_ctxt)
{
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -581,7 +581,7 @@ func_end:
/*
* ======== procwrap_detach ========
*/
-u32 __deprecated procwrap_detach(union trapped_args * args, void *pr_ctxt)
+u32 __deprecated procwrap_detach(union trapped_args *args, void *pr_ctxt)
{
/* proc_detach called at bridge_release only */
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -1564,7 +1564,7 @@ u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt)
/*
* ======== strmwrap_get_event_handle ========
*/
-u32 __deprecated strmwrap_get_event_handle(union trapped_args * args,
+u32 __deprecated strmwrap_get_event_handle(union trapped_args *args,
void *pr_ctxt)
{
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -1793,7 +1793,7 @@ u32 strmwrap_select(union trapped_args *args, void *pr_ctxt)
/*
* ======== cmmwrap_calloc_buf ========
*/
-u32 __deprecated cmmwrap_calloc_buf(union trapped_args * args, void *pr_ctxt)
+u32 __deprecated cmmwrap_calloc_buf(union trapped_args *args, void *pr_ctxt)
{
/* This operation is done in kernel */
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -1803,7 +1803,7 @@ u32 __deprecated cmmwrap_calloc_buf(union trapped_args * args, void *pr_ctxt)
/*
* ======== cmmwrap_free_buf ========
*/
-u32 __deprecated cmmwrap_free_buf(union trapped_args * args, void *pr_ctxt)
+u32 __deprecated cmmwrap_free_buf(union trapped_args *args, void *pr_ctxt)
{
/* This operation is done in kernel */
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index 3d2a26f..190ca3f 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -74,6 +74,47 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
enum nldr_phase phase);
/*
+ * ======== dcd_uuid_from_string ========
+ * Purpose:
+ * Converts an ANSI string to a dsp_uuid.
+ * Parameters:
+ * sz_uuid: Pointer to a string that represents a dsp_uuid object.
+ * uuid_obj: Pointer to a dsp_uuid object.
+ * Returns:
+ * 0: Success.
+ * -EINVAL: Coversion failed
+ * Requires:
+ * uuid_obj & sz_uuid are non-NULL values.
+ * Ensures:
+ * Details:
+ * We assume the string representation of a UUID has the following format:
+ * "12345678_1234_1234_1234_123456789abc".
+ */
+static int dcd_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
+{
+ char c;
+ u64 t;
+ struct dsp_uuid uuid_tmp;
+
+ /*
+ * sscanf implementation cannot deal with hh format modifier
+ * if the converted value doesn't fit in u32. So, convert the
+ * last six bytes to u64 and memcpy what is needed
+ */
+ if(sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx",
+ &uuid_tmp.data1, &c, &uuid_tmp.data2, &c,
+ &uuid_tmp.data3, &c, &uuid_tmp.data4,
+ &uuid_tmp.data5, &c, &t) != 10)
+ return -EINVAL;
+
+ t = cpu_to_be64(t);
+ memcpy(&uuid_tmp.data6[0], ((char*)&t) + 2, 6);
+ *uuid_obj = uuid_tmp;
+
+ return 0;
+}
+
+/*
* ======== dcd_auto_register ========
* Purpose:
* Parses the supplied image and resigsters with DCD.
@@ -253,14 +294,15 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
if (!status) {
/* Create UUID value using string retrieved from
* registry. */
- uuid_uuid_from_string(sz_value, &dsp_uuid_obj);
-
- *uuid_obj = dsp_uuid_obj;
+ status = dcd_uuid_from_string(sz_value, &dsp_uuid_obj);
- /* Increment enum_refs to update reference count. */
- enum_refs++;
+ if (!status) {
+ *uuid_obj = dsp_uuid_obj;
- status = 0;
+ /* Increment enum_refs to update reference
+ * count. */
+ enum_refs++;
+ }
} else if (status == -ENODATA) {
/* At the end of enumeration. Reset enum_refs. */
enum_refs = 0;
@@ -581,24 +623,28 @@ int dcd_get_objects(struct dcd_manager *hdcd_mgr,
psz_cur = psz_coff_buf;
while ((token = strsep(&psz_cur, seps)) && *token != '\0') {
/* Retrieve UUID string. */
- uuid_uuid_from_string(token, &dsp_uuid_obj);
-
- /* Retrieve object type */
- token = strsep(&psz_cur, seps);
+ status = dcd_uuid_from_string(token, &dsp_uuid_obj);
- /* Retrieve object type */
- object_type = atoi(token);
+ if (!status) {
+ /* Retrieve object type */
+ token = strsep(&psz_cur, seps);
- /*
- * Apply register_fxn to the found DCD object.
- * Possible actions include:
- *
- * 1) Register found DCD object.
- * 2) Unregister found DCD object (when handle == NULL)
- * 3) Add overlay node.
- */
- status =
- register_fxn(&dsp_uuid_obj, object_type, handle);
+ /* Retrieve object type */
+ object_type = atoi(token);
+
+ /*
+ * Apply register_fxn to the found DCD object.
+ * Possible actions include:
+ *
+ * 1) Register found DCD object.
+ * 2) Unregister found DCD object
+ * (when handle == NULL)
+ * 3) Add overlay node.
+ */
+ status =
+ register_fxn(&dsp_uuid_obj, object_type,
+ handle);
+ }
if (status) {
/* if error occurs, break from while loop. */
break;
@@ -1001,9 +1047,12 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* dsp_uuid ui_node_id */
- uuid_uuid_from_string(token,
- &gen_obj->obj_data.node_obj.ndb_props.
- ui_node_id);
+ status = dcd_uuid_from_string(token,
+ &gen_obj->obj_data.node_obj.
+ ndb_props.ui_node_id);
+ if (status)
+ break;
+
token = strsep(&psz_cur, seps);
/* ac_name */
@@ -1400,9 +1449,12 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
break;
} else {
/* Retrieve UUID string. */
- uuid_uuid_from_string(token,
- &(dep_lib_uuids
- [dep_libs]));
+ status = dcd_uuid_from_string(token,
+ &(dep_lib_uuids
+ [dep_libs]));
+ if (status)
+ break;
+
/* Is this library persistent? */
token = strsep(&psz_cur, seps);
prstnt_dep_libs[dep_libs] = atoi(token);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 56e355b..74d31da 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -23,7 +23,6 @@
#include <linux/pm.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
@@ -258,6 +257,8 @@ err:
/* This function maps kernel space memory to user space memory. */
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ unsigned long base_pgoff;
+ int status;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
@@ -269,9 +270,31 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_start, vma->vm_end, vma->vm_page_prot,
vma->vm_flags);
- return vm_iomap_memory(vma,
- pdata->phys_mempool_base,
- pdata->phys_mempool_size);
+ /*
+ * vm_iomap_memory() expects vma->vm_pgoff to be expressed as an offset
+ * from the start of the physical memory pool, but we're called with
+ * a pfn (physical page number) stored there instead.
+ *
+ * To avoid duplicating lots of tricky overflow checking logic,
+ * temporarily convert vma->vm_pgoff to the offset vm_iomap_memory()
+ * expects, but restore the original value once the mapping has been
+ * created.
+ */
+ base_pgoff = pdata->phys_mempool_base >> PAGE_SHIFT;
+
+ if (vma->vm_pgoff < base_pgoff)
+ return -EINVAL;
+
+ vma->vm_pgoff -= base_pgoff;
+
+ status = vm_iomap_memory(vma,
+ pdata->phys_mempool_base,
+ pdata->phys_mempool_size);
+
+ /* Restore the original value of vma->vm_pgoff */
+ vma->vm_pgoff += base_pgoff;
+
+ return status;
}
static const struct file_operations bridge_fops = {
@@ -566,7 +589,7 @@ func_cont:
class_destroy(bridge_class);
}
- return 0;
+ return status;
}
#ifdef CONFIG_PM
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index db48a78..5d1d4a1 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -102,11 +102,13 @@ static int tweak_clear_halt_cmd(struct urb *urb)
ret = usb_clear_halt(urb->dev, target_pipe);
if (ret < 0)
- dev_err(&urb->dev->dev, "usb_clear_halt error: devnum %d endp "
- "%d ret %d\n", urb->dev->devnum, target_endp, ret);
+ dev_err(&urb->dev->dev,
+ "usb_clear_halt error: devnum %d endp %d ret %d\n",
+ urb->dev->devnum, target_endp, ret);
else
- dev_info(&urb->dev->dev, "usb_clear_halt done: devnum %d endp "
- "%d\n", urb->dev->devnum, target_endp);
+ dev_info(&urb->dev->dev,
+ "usb_clear_halt done: devnum %d endp %d\n",
+ urb->dev->devnum, target_endp);
return ret;
}
@@ -127,11 +129,13 @@ static int tweak_set_interface_cmd(struct urb *urb)
ret = usb_set_interface(urb->dev, interface, alternate);
if (ret < 0)
- dev_err(&urb->dev->dev, "usb_set_interface error: inf %u alt "
- "%u ret %d\n", interface, alternate, ret);
+ dev_err(&urb->dev->dev,
+ "usb_set_interface error: inf %u alt %u ret %d\n",
+ interface, alternate, ret);
else
- dev_info(&urb->dev->dev, "usb_set_interface done: inf %u alt "
- "%u\n", interface, alternate);
+ dev_info(&urb->dev->dev,
+ "usb_set_interface done: inf %u alt %u\n",
+ interface, alternate);
return ret;
}
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e3fc749..4470cd3 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -155,8 +155,9 @@ static void usbip_dump_usb_device(struct usb_device *udev)
dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
- dev_dbg(dev, "descriptor %p, config %p, actconfig %p, "
- "rawdescriptors %p\n", &udev->descriptor, udev->config,
+ dev_dbg(dev,
+ "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
+ &udev->descriptor, udev->config,
udev->actconfig, udev->rawdescriptors);
dev_dbg(dev, "have_langid %d, string_langid %d\n",
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.c b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
index 17e08e0..66f03cc 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
@@ -165,7 +165,7 @@ int read_attr_speed(struct sysfs_device *dev)
goto err;
}
- ret = sscanf(attr->value, "%s\n", speed);
+ ret = sscanf(attr->value, "%99s\n", speed);
if (ret < 1) {
dbg("sscanf failed");
goto err;
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index 1091bb2..209df9b 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -4,6 +4,8 @@
#include "usbip_common.h"
#include "vhci_driver.h"
+#include <limits.h>
+#include <netdb.h>
#undef PROGNAME
#define PROGNAME "libusbip"
@@ -72,7 +74,7 @@ static int parse_status(char *value)
unsigned long socket;
char lbusid[SYSFS_BUS_ID_SIZE];
- ret = sscanf(c, "%d %d %d %x %lx %s\n",
+ ret = sscanf(c, "%d %d %d %x %lx %31s\n",
&port, &status, &speed,
&devid, &socket, lbusid);
@@ -337,6 +339,29 @@ err:
return -1;
}
+static int read_record(int rhport, char *host, char *port, char *busid)
+{
+ FILE *file;
+ char path[PATH_MAX+1];
+
+ snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
+
+ file = fopen(path, "r");
+ if (!file) {
+ err("fopen");
+ return -1;
+ }
+
+ if (fscanf(file, "%s %s %s\n", host, port, busid) != 3) {
+ err("fscanf");
+ fclose(file);
+ return -1;
+ }
+
+ fclose(file);
+
+ return 0;
+}
/* ---------------------------------------------------------------------- */
@@ -535,3 +560,45 @@ int usbip_vhci_detach_device(uint8_t port)
return 0;
}
+
+int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev)
+{
+ char product_name[100];
+ char host[NI_MAXHOST] = "unknown host";
+ char serv[NI_MAXSERV] = "unknown port";
+ char remote_busid[SYSFS_BUS_ID_SIZE];
+ int ret;
+ int read_record_error = 0;
+
+ if (idev->status == VDEV_ST_NULL || idev->status == VDEV_ST_NOTASSIGNED)
+ return 0;
+
+ ret = read_record(idev->port, host, serv, remote_busid);
+ if (ret) {
+ err("read_record");
+ read_record_error = 1;
+ }
+
+ printf("Port %02d: <%s> at %s\n", idev->port,
+ usbip_status_string(idev->status),
+ usbip_speed_string(idev->udev.speed));
+
+ usbip_names_get_product(product_name, sizeof(product_name),
+ idev->udev.idVendor, idev->udev.idProduct);
+
+ printf(" %s\n", product_name);
+
+ if (!read_record_error) {
+ printf("%10s -> usbip://%s:%s/%s\n", idev->udev.busid,
+ host, serv, remote_busid);
+ printf("%10s -> remote bus/dev %03d/%03d\n", " ",
+ idev->busnum, idev->devnum);
+ } else {
+ printf("%10s -> unknown host, remote port and remote busid\n",
+ idev->udev.busid);
+ printf("%10s -> remote bus/dev %03d/%03d\n", " ",
+ idev->busnum, idev->devnum);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
index 89949aa..e071f80 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
@@ -64,4 +64,6 @@ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum,
int usbip_vhci_detach_device(uint8_t port);
+int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev);
+
#endif /* __VHCI_DRIVER_H */
diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am
index a113003..b4f8c4b0 100644
--- a/drivers/staging/usbip/userspace/src/Makefile.am
+++ b/drivers/staging/usbip/userspace/src/Makefile.am
@@ -6,7 +6,7 @@ sbin_PROGRAMS := usbip usbipd
usbip_SOURCES := usbip.h utils.h usbip.c utils.c usbip_network.c \
usbip_attach.c usbip_detach.c usbip_list.c \
- usbip_bind.c usbip_unbind.c
+ usbip_bind.c usbip_unbind.c usbip_port.c
usbipd_SOURCES := usbip_network.h usbipd.c usbip_network.c
diff --git a/drivers/staging/usbip/userspace/src/usbip.c b/drivers/staging/usbip/userspace/src/usbip.c
index 04a5f20..d7599d9 100644
--- a/drivers/staging/usbip/userspace/src/usbip.c
+++ b/drivers/staging/usbip/userspace/src/usbip.c
@@ -93,6 +93,12 @@ static const struct command cmds[] = {
.help = "Unbind device from " USBIP_HOST_DRV_NAME ".ko",
.usage = usbip_unbind_usage
},
+ {
+ .name = "port",
+ .fn = usbip_port_show,
+ .help = "Show imported USB devices",
+ .usage = NULL
+ },
{ NULL, NULL, NULL, NULL }
};
diff --git a/drivers/staging/usbip/userspace/src/usbip.h b/drivers/staging/usbip/userspace/src/usbip.h
index 14d4a47..84fe66a 100644
--- a/drivers/staging/usbip/userspace/src/usbip.h
+++ b/drivers/staging/usbip/userspace/src/usbip.h
@@ -29,6 +29,7 @@ int usbip_detach(int argc, char *argv[]);
int usbip_list(int argc, char *argv[]);
int usbip_bind(int argc, char *argv[]);
int usbip_unbind(int argc, char *argv[]);
+int usbip_port_show(int argc, char *argv[]);
void usbip_attach_usage(void);
void usbip_detach_usage(void);
diff --git a/drivers/staging/usbip/userspace/src/usbip_port.c b/drivers/staging/usbip/userspace/src/usbip_port.c
new file mode 100644
index 0000000..52aa168
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_port.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "vhci_driver.h"
+#include "usbip_common.h"
+
+static int list_imported_devices()
+{
+ int i;
+ struct usbip_imported_device *idev;
+ int ret;
+
+ ret = usbip_vhci_driver_open();
+ if (ret < 0) {
+ err("open vhci_driver");
+ return -1;
+ }
+
+ printf("Imported USB devices\n");
+ printf("====================\n");
+
+ for (i = 0; i < vhci_driver->nports; i++) {
+ idev = &vhci_driver->idev[i];
+
+ if (usbip_vhci_imported_device_dump(idev) < 0)
+ ret = -1;
+ }
+
+ usbip_vhci_driver_close();
+
+ return ret;
+
+}
+
+int usbip_port_show(__attribute__((unused)) int argc,
+ __attribute__((unused)) char *argv[])
+{
+ int ret;
+
+ ret = list_imported_devices();
+ if (ret < 0)
+ err("list imported devices");
+
+ return ret;
+}
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index e810ad5..72391ef 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -220,8 +220,7 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__force __u16)
- (__constant_cpu_to_le16(0x0001));
+ desc->wHubCharacteristics = (__constant_cpu_to_le16(0x0001));
desc->bNbrPorts = VHCI_NPORTS;
desc->u.hs.DeviceRemovable[0] = 0xff;
desc->u.hs.DeviceRemovable[1] = 0xff;
@@ -348,8 +347,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
USB_PORT_STAT_ENABLE;
}
}
- ((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
- ((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
+ ((__le16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
+ ((__le16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
usbip_dbg_vhci_rh(" GetPortStatus bye %x %x\n", ((u16 *)buf)[0],
((u16 *)buf)[1]);
@@ -537,7 +536,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
goto no_need_xmit;
case USB_REQ_GET_DESCRIPTOR:
- if (ctrlreq->wValue == (USB_DT_DEVICE << 8))
+ if (ctrlreq->wValue == cpu_to_le16(USB_DT_DEVICE << 8))
usbip_dbg_vhci_hc("Not yet?: "
"Get_Descriptor to device 0 "
"(get max pipe size)\n");
@@ -918,7 +917,7 @@ static void vhci_stop(struct usb_hcd *hcd)
sysfs_remove_group(&vhci_dev(vhci)->kobj, &dev_attr_group);
/* 2. shutdown all the ports of vhci_hcd */
- for (rhport = 0 ; rhport < VHCI_NPORTS; rhport++) {
+ for (rhport = 0; rhport < VHCI_NPORTS; rhport++) {
struct vhci_device *vdev = &vhci->vdev[rhport];
usbip_event_add(&vdev->ud, VDEV_EVENT_REMOVED);
@@ -1108,7 +1107,7 @@ static struct platform_driver vhci_driver = {
.suspend = vhci_hcd_suspend,
.resume = vhci_hcd_resume,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
},
};
@@ -1125,7 +1124,7 @@ static void the_pdev_release(struct device *dev)
static struct platform_device the_pdev = {
/* should be the same name as driver_name */
- .name = (char *) driver_name,
+ .name = driver_name,
.id = -1,
.dev = {
.release = the_pdev_release,
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index daec155..7927927 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -147,6 +147,7 @@ static const struct file_operations vme_user_fops = {
.write = vme_user_write,
.llseek = vme_user_llseek,
.unlocked_ioctl = vme_user_unlocked_ioctl,
+ .compat_ioctl = vme_user_unlocked_ioctl,
};
@@ -663,9 +664,16 @@ err_nocard:
static int vme_user_match(struct vme_dev *vdev)
{
- if (vdev->num >= VME_USER_BUS_MAX)
- return 0;
- return 1;
+ int i;
+
+ int cur_bus = vme_bus_num(vdev);
+ int cur_slot = vme_slot_num(vdev);
+
+ for (i = 0; i < bus_num; i++)
+ if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
+ return 1;
+
+ return 0;
}
/*
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index 280ccc7..b8cc7bc 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -7,18 +7,18 @@
* VMEbus Master Window Configuration Structure
*/
struct vme_master {
- int enable; /* State of Window */
- unsigned long long vme_addr; /* Starting Address on the VMEbus */
- unsigned long long size; /* Window Size */
- u32 aspace; /* Address Space */
- u32 cycle; /* Cycle properties */
- u32 dwidth; /* Maximum Data Width */
+ __u32 enable; /* State of Window */
+ __u64 vme_addr; /* Starting Address on the VMEbus */
+ __u64 size; /* Window Size */
+ __u32 aspace; /* Address Space */
+ __u32 cycle; /* Cycle properties */
+ __u32 dwidth; /* Maximum Data Width */
#if 0
char prefetchenable; /* Prefetch Read Enable State */
int prefetchsize; /* Prefetch Read Size (Cache Lines) */
char wrpostenable; /* Write Post State */
#endif
-};
+} __packed;
/*
@@ -31,17 +31,17 @@ struct vme_master {
/* VMEbus Slave Window Configuration Structure */
struct vme_slave {
- int enable; /* State of Window */
- unsigned long long vme_addr; /* Starting Address on the VMEbus */
- unsigned long long size; /* Window Size */
- u32 aspace; /* Address Space */
- u32 cycle; /* Cycle properties */
+ __u32 enable; /* State of Window */
+ __u64 vme_addr; /* Starting Address on the VMEbus */
+ __u64 size; /* Window Size */
+ __u32 aspace; /* Address Space */
+ __u32 cycle; /* Cycle properties */
#if 0
char wrpostenable; /* Write Post State */
char rmwlock; /* Lock PCI during RMW Cycles */
char data64bitcapable; /* non-VMEbus capable of 64-bit Data */
#endif
-};
+} __packed;
struct vme_irq_id {
__u8 level;
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
index ba53340..ba155cd 100644
--- a/drivers/staging/vt6655/80211hdr.h
+++ b/drivers/staging/vt6655/80211hdr.h
@@ -155,7 +155,7 @@
#ifdef __BIG_ENDIAN
/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) ((((unsigned short)(n) >> 8) & (BIT0 | BIT1))
+#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n) >> 8) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n) >> 8) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n) << 8) & (BIT8)) >> 8)
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 959568a..fa14659 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1865,7 +1865,7 @@ BBvCalculateParameter(
break;
case RATE_5M:
- if (bCCK == false)
+ if (!bCCK)
cbBitCount++;
cbUsCount = (cbBitCount * 10) / 55;
cbTmp = (cbUsCount * 55) / 10;
@@ -1879,7 +1879,7 @@ BBvCalculateParameter(
case RATE_11M:
- if (bCCK == false)
+ if (!bCCK)
cbBitCount++;
cbUsCount = cbBitCount / 11;
cbTmp = cbUsCount * 11;
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index a23b591..d7efd017 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -64,7 +64,6 @@
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
const unsigned short awHWRetry0[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
@@ -131,27 +130,26 @@ BSSpSearchBSSList(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
- (memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0)) {
+ (memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0))
pbyBSSID = pbyDesireBSSID;
- }
}
if (pbyDesireSSID != NULL) {
- if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0) {
+ if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0)
pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
- }
}
if (pbyBSSID != NULL) {
- // match BSSID first
+ /* match BSSID first */
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
- if (pDevice->bLinkPass == false) pCurrBSS->bSelected = false;
+ if (!pDevice->bLinkPass)
+ pCurrBSS->bSelected = false;
if ((pCurrBSS->bActive) &&
- (pCurrBSS->bSelected == false)) {
+ (!pCurrBSS->bSelected)) {
if (ether_addr_equal(pCurrBSS->abyBSSID,
pbyBSSID)) {
if (pSSID != NULL) {
- // compare ssid
+ /* compare ssid */
if (!memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
pSSID->len)) {
@@ -176,26 +174,26 @@ BSSpSearchBSSList(
}
}
} else {
- // ignore BSSID
+ /* ignore BSSID */
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
- //2007-0721-01<Add>by MikeLiu
+ /* 2007-0721-01<Add>by MikeLiu */
pCurrBSS->bSelected = false;
if (pCurrBSS->bActive) {
if (pSSID != NULL) {
- // matched SSID
+ /* matched SSID */
if (!!memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
pSSID->len) ||
(pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) {
- // SSID not match skip this BSS
+ /* SSID not match skip this BSS */
continue;
}
}
if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))
) {
- // Type not match skip this BSS
+ /* Type not match skip this BSS */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSS type mismatch.... Config[%d] BSS[0x%04x]\n", pMgmt->eConfigMode, pCurrBSS->wCapInfo);
continue;
}
@@ -203,50 +201,23 @@ BSSpSearchBSSList(
if (ePhyType != PHY_TYPE_AUTO) {
if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) ||
((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
- // PhyType not match skip this BSS
+ /* PhyType not match skip this BSS */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Physical type mismatch.... ePhyType[%d] BSS[%d]\n", ePhyType, pCurrBSS->eNetworkTypeInUse);
continue;
}
}
-/*
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) {
- if (pCurrBSS->bWPAValid == true) {
- // WPA AP will reject connection of station without WPA enable.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- if (pCurrBSS->bWPAValid == false) {
- // station with WPA enable can't join NonWPA AP.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (pCurrBSS->bWPA2Valid == false) {
- // station with WPA2 enable can't join NonWPA2 AP.
- continue;
- }
- }
-*/
+
if (pSelect == NULL) {
pSelect = pCurrBSS;
} else {
- // compare RSSI, select signal strong one
- if (pCurrBSS->uRSSI < pSelect->uRSSI) {
+ /* compare RSSI, select signal strong one */
+ if (pCurrBSS->uRSSI < pSelect->uRSSI)
pSelect = pCurrBSS;
- }
}
}
}
if (pSelect != NULL) {
pSelect->bSelected = true;
-/*
- if (pDevice->bRoaming == false) {
- // Einsn Add @20070907
- memset(pbyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- }*/
-
return pSelect;
}
}
@@ -278,7 +249,6 @@ BSSvClearBSSList(
if (pMgmt->sBSSList[ii].bActive &&
ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
pMgmt->abyCurrBSSID)) {
- // bKeepCurrBSSID = false;
continue;
}
}
@@ -385,7 +355,7 @@ BSSbInsertToBSSList(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n");
return false;
}
- // save the BSS info
+ /* save the BSS info */
pBSSList->bActive = true;
memcpy(pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp));
@@ -416,15 +386,14 @@ BSSbInsertToBSSList(
pBSSList->sERP.byERP = psERP->byERP;
pBSSList->sERP.bERPExist = psERP->bERPExist;
- // Check if BSS is 802.11a/b/g
+ /* check if BSS is 802.11a/b/g */
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == true) {
+ if (pBSSList->sERP.bERPExist)
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
+ else
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
}
pBSSList->byRxRate = pRxPacket->byRxRate;
@@ -434,10 +403,9 @@ BSSbInsertToBSSList(
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
+ /* assoc with BSS */
+ if (pBSSList == pMgmt->pCurrBSS)
bParsingQuiet = true;
- }
}
WPA_ClearRSN(pBSSList);
@@ -463,7 +431,7 @@ BSSbInsertToBSSList(
}
}
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) {
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || pBSSList->bWPA2Valid) {
PSKeyItem pTransmitKey = NULL;
bool bIs802_1x = false;
@@ -473,13 +441,13 @@ BSSbInsertToBSSList(
break;
}
}
- if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
+ if (bIs802_1x && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
(!memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
- if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) ||
- (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) {
+ if (pDevice->bLinkPass && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) ||
+ KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey)) {
pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
pDevice->gsPMKIDCandidate.Version = 1;
@@ -490,7 +458,7 @@ BSSbInsertToBSSList(
}
if (pDevice->bUpdateBBVGA) {
- // Moniter if RSSI is too strong.
+ /* monitor if RSSI is too strong */
pBSSList->byRSSIStatCnt = 0;
RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
@@ -498,16 +466,15 @@ BSSbInsertToBSSList(
pBSSList->ldBmAverage[ii] = 0;
}
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == true)) {
+ if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
pIE_Country);
}
- if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) {
+ if (bParsingQuiet && (pIE_Quiet != NULL)) {
if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
(((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
+ /* valid EID */
if (pQuiet == NULL) {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet(pMgmt->pAdapter,
@@ -530,8 +497,7 @@ BSSbInsertToBSSList(
}
}
- if ((bParsingQuiet == true) &&
- (pQuiet != NULL)) {
+ if (bParsingQuiet && (pQuiet != NULL)) {
CARDbStartQuiet(pMgmt->pAdapter);
}
@@ -552,7 +518,7 @@ BSSbInsertToBSSList(
* true if success.
*
-*/
-// TODO: input structure modify
+/* TODO: input structure modify */
bool
BSSbUpdateToBSSList(
@@ -593,7 +559,6 @@ BSSbUpdateToBSSList(
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
pBSSList->uChannel = byCurrChannel;
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSbUpdateToBSSList: pBSSList->uChannel: %d\n", pBSSList->uChannel);
if (pSSID->len > WLAN_SSID_MAXLEN)
pSSID->len = WLAN_SSID_MAXLEN;
@@ -602,23 +567,21 @@ BSSbUpdateToBSSList(
memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
memcpy(pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
- if (pExtSuppRates != NULL) {
+ if (pExtSuppRates != NULL)
memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- } else {
+ else
memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
pBSSList->sERP.byERP = psERP->byERP;
pBSSList->sERP.bERPExist = psERP->bERPExist;
- // Check if BSS is 802.11a/b/g
+ /* check if BSS is 802.11a/b/g */
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == true) {
+ if (pBSSList->sERP.bERPExist)
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
+ else
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
}
pBSSList->byRxRate = pRxPacket->byRxRate;
@@ -629,13 +592,12 @@ BSSbUpdateToBSSList(
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
+ /* assoc with BSS */
+ if (pBSSList == pMgmt->pCurrBSS)
bParsingQuiet = true;
- }
}
- WPA_ClearRSN(pBSSList); //mike update
+ WPA_ClearRSN(pBSSList); /* mike update */
if (pRSNWPA != NULL) {
unsigned int uLen = pRSNWPA->len + 2;
@@ -646,7 +608,7 @@ BSSbUpdateToBSSList(
}
}
- WPA2_ClearRSN(pBSSList); //mike update
+ WPA2_ClearRSN(pBSSList); /* mike update */
if (pRSN != NULL) {
unsigned int uLen = pRSN->len + 2;
@@ -659,27 +621,25 @@ BSSbUpdateToBSSList(
if (pRxPacket->uRSSI != 0) {
RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &ldBm);
- // Moniter if RSSI is too strong.
+ /* monitor if RSSI is too strong */
pBSSList->byRSSIStatCnt++;
pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
+ if (pBSSList->ldBmAverage[ii] != 0)
pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm);
- }
}
}
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == true)) {
+ if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
pIE_Country);
}
- if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) {
+ if (bParsingQuiet && (pIE_Quiet != NULL)) {
if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
(((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
+ /* valid EID */
if (pQuiet == NULL) {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet(pMgmt->pAdapter,
@@ -702,8 +662,7 @@ BSSbUpdateToBSSList(
}
}
- if ((bParsingQuiet == true) &&
- (pQuiet != NULL)) {
+ if (bParsingQuiet && (pQuiet != NULL)) {
CARDbStartQuiet(pMgmt->pAdapter);
}
@@ -732,7 +691,7 @@ BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr,
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
unsigned int ii;
- // Index = 0 reserved for AP Node
+ /* Index = 0 reserved for AP Node */
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
if (ether_addr_equal(abyDstAddr,
@@ -765,8 +724,10 @@ BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
unsigned int BigestCount = 0;
unsigned int SelectIndex;
struct sk_buff *skb;
- // Index = 0 reserved for AP Node (In STA mode)
- // Index = 0 reserved for Broadcast/MultiCast (In AP mode)
+ /*
+ * Index = 0 reserved for AP Node (In STA mode)
+ * Index = 0 reserved for Broadcast/MultiCast (In AP mode)
+ */
SelectIndex = 1;
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
@@ -779,11 +740,11 @@ BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
}
}
- // if not found replace uInActiveCount is largest one.
+ /* if not found replace uInActiveCount is largest one */
if (ii == (MAX_NODE_NUM + 1)) {
*puNodeIndex = SelectIndex;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Replace inactive node = %d\n", SelectIndex);
- // clear ps buffer
+ /* clear ps buffer */
if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) {
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL)
dev_kfree_skb(skb);
@@ -795,7 +756,7 @@ BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
- // for AP mode PS queue
+ /* for AP mode PS queue */
skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
@@ -826,9 +787,9 @@ BSSvRemoveOneNode(
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
dev_kfree_skb(skb);
- // clear context
+ /* clear context */
memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
- // clear tx bit map
+ /* clear tx bit map */
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
return;
@@ -859,9 +820,8 @@ BSSvUpdateAPNode(
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
pMgmt->sNodeDBTable[0].bActive = true;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
+ if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
uRateLen = WLAN_RATES_MAXLEN_11B;
- }
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
uRateLen);
@@ -882,12 +842,10 @@ BSSvUpdateAPNode(
pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
-#ifdef PLICE_DEBUG
- printk("BSSvUpdateAPNode:MaxSuppRate is %d\n", pMgmt->sNodeDBTable[0].wMaxSuppRate);
-#endif
- // Auto rate fallback function initiation.
- // RATEbInit(pDevice);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->sNodeDBTable[0].wTxDataRate = %d \n", pMgmt->sNodeDBTable[0].wTxDataRate);
+ netdev_dbg(pDevice->dev, "BSSvUpdateAPNode:MaxSuppRate is %d\n",
+ pMgmt->sNodeDBTable[0].wMaxSuppRate);
+ /* auto rate fallback function initiation */
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->sNodeDBTable[0].wTxDataRate = %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
};
/*+
@@ -926,9 +884,9 @@ BSSvAddMulticastNode(
&(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
);
pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
-#ifdef PLICE_DEBUG
- printk("BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
-#endif
+ netdev_dbg(pDevice->dev,
+ "BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
};
@@ -944,7 +902,7 @@ BSSvAddMulticastNode(
* none.
*
-*/
-//2008-4-14 <add> by chester for led issue
+/* 2008-4-14 <add> by chester for led issue */
#ifdef FOR_LED_ON_NOTEBOOK
bool cc = false;
unsigned int status;
@@ -961,7 +919,7 @@ BSSvSecondCallBack(
unsigned int uSleepySTACnt = 0;
unsigned int uNonShortSlotSTACnt = 0;
unsigned int uLongPreambleSTACnt = 0;
- viawget_wpa_header *wpahdr; //DavidWang
+ viawget_wpa_header *wpahdr; /* DavidWang */
spin_lock_irq(&pDevice->lock);
@@ -969,51 +927,47 @@ BSSvSecondCallBack(
pDevice->byERPFlag &=
~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
- //2008-4-14 <add> by chester for led issue
+ /* 2008-4-14 <add> by chester for led issue */
#ifdef FOR_LED_ON_NOTEBOOK
MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
- if (((!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->bHWRadioOff == false)) || ((pDevice->byGPIO & GPIO0_DATA) && (pDevice->bHWRadioOff == true))) && (cc == false)) {
+ if (((!(pDevice->byGPIO & GPIO0_DATA) && (!pDevice->bHWRadioOff)) ||
+ ((pDevice->byGPIO & GPIO0_DATA) && pDevice->bHWRadioOff)) &&
+ (!cc)) {
cc = true;
- } else if (cc == true) {
- if (pDevice->bHWRadioOff == true) {
- if (!(pDevice->byGPIO & GPIO0_DATA))
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 1) goto start;
+ } else if (cc) {
+ if (pDevice->bHWRadioOff) {
+ if (!(pDevice->byGPIO & GPIO0_DATA)) {
+ if (status == 1)
+ goto start;
status = 1;
CARDbRadioPowerOff(pDevice);
pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- //netif_stop_queue(pDevice->dev);
pDevice->bLinkPass = false;
}
- if (pDevice->byGPIO & GPIO0_DATA)
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 2) goto start;
+ if (pDevice->byGPIO & GPIO0_DATA) {
+ if (status == 2)
+ goto start;
status = 2;
CARDbRadioPowerOn(pDevice);
}
} else {
- if (pDevice->byGPIO & GPIO0_DATA)
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 3) goto start;
+ if (pDevice->byGPIO & GPIO0_DATA) {
+ if (status == 3)
+ goto start;
status = 3;
CARDbRadioPowerOff(pDevice);
pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- //netif_stop_queue(pDevice->dev);
pDevice->bLinkPass = false;
}
- if (!(pDevice->byGPIO & GPIO0_DATA))
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 4) goto start;
+ if (!(pDevice->byGPIO & GPIO0_DATA)) {
+ if (status == 4)
+ goto start;
status = 4;
CARDbRadioPowerOn(pDevice);
}
@@ -1025,14 +979,15 @@ start:
if (pDevice->wUseProtectCntDown > 0) {
pDevice->wUseProtectCntDown--;
} else {
- // disable protect mode
+ /* disable protect mode */
pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
}
{
pDevice->byReAssocCount++;
- if ((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
- printk("Re-association timeout!!!\n");
+ /* 10 sec timeout */
+ if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
+ netdev_info(pDevice->dev, "Re-association timeout!!!\n");
pDevice->byReAssocCount = 0;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
{
@@ -1043,7 +998,7 @@ start:
wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
}
#endif
- } else if (pDevice->bLinkPass == true)
+ } else if (pDevice->bLinkPass)
pDevice->byReAssocCount = 0;
}
@@ -1053,7 +1008,7 @@ start:
for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- // Increase in-activity counter
+ /* increase in-activity counter */
pMgmt->sNodeDBTable[ii].uInActiveCount++;
if (ii > 0) {
@@ -1067,7 +1022,7 @@ start:
if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) {
pDevice->uAssocCount++;
- // check if Non ERP exist
+ /* check if Non ERP exist */
if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) {
if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
@@ -1082,43 +1037,39 @@ start:
}
}
- // check if any STA in PS mode
+ /* check if any STA in PS mode */
if (pMgmt->sNodeDBTable[ii].bPSEnable)
uSleepySTACnt++;
}
- // Rate fallback check
+ /* rate fallback check */
if (!pDevice->bFixRate) {
-/*
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (ii == 0))
- RATEvTxRateFallBack(pDevice, &(pMgmt->sNodeDBTable[ii]));
-*/
if (ii > 0) {
- // ii = 0 for multicast node (AP & Adhoc)
+ /* ii = 0 for multicast node (AP & Adhoc) */
RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
} else {
- // ii = 0 reserved for unicast AP node (Infra STA)
+ /* ii = 0 reserved for unicast AP node (Infra STA) */
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
-#ifdef PLICE_DEBUG
- printk("SecondCallback:Before:TxDataRate is %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
-#endif
+ netdev_dbg(pDevice->dev,
+ "SecondCallback:Before:TxDataRate is %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
-#ifdef PLICE_DEBUG
- printk("SecondCallback:After:TxDataRate is %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
-#endif
+ netdev_dbg(pDevice->dev,
+ "SecondCallback:After:TxDataRate is %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
}
}
- // check if pending PS queue
+ /* check if pending PS queue */
if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending \n",
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending\n",
ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
if ((ii > 0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) {
BSSvRemoveOneNode(pDevice, ii);
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove \n", ii);
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove\n", ii);
continue;
}
}
@@ -1127,7 +1078,7 @@ start:
}
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->eCurrentPHYType == PHY_TYPE_11G)) {
- // on/off protect mode
+ /* on/off protect mode */
if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
if (!pDevice->bProtectMode) {
MACvEnableProtectMD(pDevice->PortOffset);
@@ -1139,7 +1090,7 @@ start:
pDevice->bProtectMode = false;
}
}
- // on/off short slot time
+ /* on/off short slot time */
if (uNonShortSlotSTACnt > 0) {
if (pDevice->bShortSlotTime) {
@@ -1155,7 +1106,7 @@ start:
}
}
- // on/off barker long preamble mode
+ /* on/off barker long preamble mode */
if (uLongPreambleSTACnt > 0) {
if (!pDevice->bBarkerPreambleMd) {
@@ -1171,7 +1122,7 @@ start:
}
- // Check if any STA in PS mode, enable DTIM multicast deliver
+ /* check if any STA in PS mode, enable DTIM multicast deliver */
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (uSleepySTACnt > 0)
pMgmt->sNodeDBTable[0].bPSEnable = true;
@@ -1184,11 +1135,10 @@ start:
if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
- if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- if (pDevice->bUpdateBBVGA) {
- // s_vCheckSensitivity((void *) pDevice);
+ /* assoc with BSS */
+ if (pMgmt->sNodeDBTable[0].bActive) {
+ if (pDevice->bUpdateBBVGA)
s_vCheckPreEDThreshold((void *)pDevice);
- }
if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
(pDevice->byBBVGACurrent != pDevice->abyBBVGA[0])) {
@@ -1232,12 +1182,18 @@ start:
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //network manager support need not do Roaming scan???
- if (pDevice->bWPASuppWextEnabled == true)
+ /*
+ * network manager support need not do
+ * Roaming scan???
+ */
+ if (pDevice->bWPASuppWextEnabled)
pDevice->uAutoReConnectTime = 0;
#endif
} else {
- //mike use old encryption status for wpa reauthen
+ /*
+ * mike use old encryption status
+ * for wpa reauthentication
+ */
if (pDevice->bWPADEVUp)
pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
@@ -1252,7 +1208,7 @@ start:
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescanning.
+ /* if adhoc started which essid is NULL string, rescanning */
if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
@@ -1262,13 +1218,11 @@ start:
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
pDevice->uAutoReConnectTime = 0;
- };
+ }
}
if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (pDevice->bUpdateBBVGA) {
- //s_vCheckSensitivity((void *) pDevice);
+ if (pDevice->bUpdateBBVGA)
s_vCheckPreEDThreshold((void *)pDevice);
- }
if (pMgmt->sNodeDBTable[0].uInActiveCount >= ADHOC_LOST_BEACON_COUNT) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
@@ -1318,40 +1272,31 @@ BSSvUpdateNodeTxCounter(
unsigned short wFallBackRate = RATE_1M;
unsigned char byFallBack;
unsigned int ii;
-// unsigned int txRetryTemp;
-//PLICE_DEBUG->
- //txRetryTemp = byTxRetry;
-//PLICE_DEBUG <-
pTxBufHead = (PSTxBufHead) pbyBuffer;
- if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0) {
+ if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0)
byFallBack = AUTO_FB_0;
- } else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1) {
+ else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1)
byFallBack = AUTO_FB_1;
- } else {
+ else
byFallBack = AUTO_FB_NONE;
- }
- wRate = pTxBufHead->wReserved; //?wRate
+ wRate = pTxBufHead->wReserved;
- // Only Unicast using support rates
+ /* Only Unicast using support rates */
if (pTxBufHead->wFIFOCtl & FIFOCTL_NEEDACK) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wRate %04X, byTsr0 %02X, byTsr1 %02X\n", wRate, byTsr0, byTsr1);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
pMgmt->sNodeDBTable[0].uTxAttempts += 1;
if ((byTsr1 & TSR1_TERR) == 0) {
- // transmit success, TxAttempts at least plus one
+ /* transmit success, TxAttempts at least plus one */
pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
if ((byFallBack == AUTO_FB_NONE) ||
(wRate < RATE_18M)) {
wFallBackRate = wRate;
} else if (byFallBack == AUTO_FB_0) {
-//PLICE_DEBUG
if (byTxRetry < 5)
wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][byTxRetry];
- //wFallBackRate = awHWRetry0[wRate-RATE_18M][txRetryTemp] +1;
else
wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][4];
} else if (byFallBack == AUTO_FB_1) {
if (byTxRetry < 5)
wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
@@ -1369,18 +1314,11 @@ BSSvUpdateNodeTxCounter(
(wRate < RATE_18M)) {
pMgmt->sNodeDBTable[0].uTxFail[wRate] += byTxRetry;
} else if (byFallBack == AUTO_FB_0) {
-//PLICE_DEBUG
- for (ii = 0; ii < byTxRetry; ii++)
- //for (ii=0;ii<txRetryTemp;ii++)
- {
- if (ii < 5) {
-//PLICE_DEBUG
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
wFallBackRate = awHWRetry0[wRate-RATE_18M][ii];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][ii];
- } else {
+ else
wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][4];
- }
pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
}
} else if (byFallBack == AUTO_FB_1) {
@@ -1402,7 +1340,7 @@ BSSvUpdateNodeTxCounter(
if (BSSDBbIsSTAInNodeDB((void *)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
if ((byTsr1 & TSR1_TERR) == 0) {
- // transmit success, TxAttempts at least plus one
+ /* transmit success, TxAttempts at least plus one */
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
if ((byFallBack == AUTO_FB_NONE) ||
(wRate < RATE_18M)) {
@@ -1485,7 +1423,7 @@ BSSvClearNodeDBTable(
for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- // check if sTxPSQueue has been initial
+ /* check if sTxPSQueue has been initial */
if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) {
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS skb != NULL %d\n", ii);
@@ -1517,7 +1455,7 @@ void s_vCheckSensitivity(
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
if (pBSSList != NULL) {
- // Updata BB Reg if RSSI is too strong.
+ /* Update BB Reg if RSSI is too strong */
long LocalldBmAverage = 0;
long uNumofdBm = 0;
for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
@@ -1556,9 +1494,8 @@ BSSvClearAnyBSSJoinRecord(
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++)
pMgmt->sBSSList[ii].bSelected = false;
- }
return;
}
@@ -1580,19 +1517,18 @@ void s_uCalculateLinkQual(
pDevice->scStatistic.RxOkCnt;
TxOkRatio = (TxCnt < 6) ? 4000 : ((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
RxOkRatio = (RxCnt < 6) ? 2000 : ((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
-//decide link quality
- if (pDevice->bLinkPass != true) {
+ /* decide link quality */
+ if (!pDevice->bLinkPass) {
pDevice->scStatistic.LinkQuality = 0;
pDevice->scStatistic.SignalStren = 0;
} else {
RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- if (-ldBm < 50) {
+ if (-ldBm < 50)
RssiRatio = 4000;
- } else if (-ldBm > 90) {
+ else if (-ldBm > 90)
RssiRatio = 0;
- } else {
+ else
RssiRatio = (40-(-ldBm-50))*4000/40;
- }
pDevice->scStatistic.SignalStren = RssiRatio/40;
pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100;
}
@@ -1616,10 +1552,8 @@ void s_vCheckPreEDThreshold(
if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
+ if (pBSSList != NULL)
pDevice->byBBPreEDRSSI = (unsigned char) (~(pBSSList->ldBmAverRange) + 1);
- //BBvUpdatePreEDThreshold(pDevice, false);
- }
}
return;
}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index fbf18e2..db38ca0 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -1053,7 +1053,7 @@ CARDbAdd_PMKID_Candidate(
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((bRSNCapExist == true) && (wRSNCap & BIT0)) {
+ if (bRSNCapExist && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -1064,7 +1064,7 @@ CARDbAdd_PMKID_Candidate(
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((bRSNCapExist == true) && (wRSNCap & BIT0)) {
+ if (bRSNCapExist && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -1190,7 +1190,7 @@ CARDbStartMeasure(
}
} while (pDevice->uNumOfMeasureEIDs != 0);
- if (bExpired == false) {
+ if (!bExpired) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART, LODWORD(qwStartTSF));
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART + 4, HIDWORD(qwStartTSF));
@@ -1280,7 +1280,7 @@ CARDbSetQuiet(
PSDevice pDevice = (PSDevice) pDeviceHandler;
unsigned int ii = 0;
- if (bResetQuiet == true) {
+ if (bResetQuiet) {
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
for (ii = 0; ii < MAX_QUIET_COUNT; ii++) {
pDevice->sQuiet[ii].bEnable = false;
@@ -2013,7 +2013,7 @@ QWORD CARDqGetTSFOffset(unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2)
HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2) - 1;
} else {
HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2);
- };
+ }
return qwTSFOffset;
}
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index ba9481f..3198a31 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -441,8 +441,8 @@ void init_channel_table(void *pDeviceHandler)
break;
}
- if ((pDevice->dwDiagRefCount != 0) || (pDevice->b11hEnable == true)) {
- if (bMultiBand == true) {
+ if ((pDevice->dwDiagRefCount != 0) || pDevice->b11hEnable) {
+ if (bMultiBand) {
for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
sChannelTbl[ii + 1].bValid = true;
pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
@@ -463,7 +463,7 @@ void init_channel_table(void *pDeviceHandler)
}
}
} else if (pDevice->byZoneType <= CCODE_MAX) {
- if (bMultiBand == true) {
+ if (bMultiBand) {
for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
sChannelTbl[ii + 1].bValid = true;
@@ -531,7 +531,7 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
return bResult;
}
- if (sChannelTbl[uConnectionChannel].bValid == false) {
+ if (!sChannelTbl[uConnectionChannel].bValid) {
return false;
}
@@ -557,7 +557,7 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (unsigned char)uConnectionChannel);
// Init Synthesizer Table
- if (pDevice->bEnablePSMode == true)
+ if (pDevice->bEnablePSMode)
RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel);
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CARDbSetMediaChannel: %d\n", (unsigned char)uConnectionChannel);
@@ -766,7 +766,7 @@ unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
if (ePHYType == PHY_TYPE_11A) {
for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (sChannelTbl[ii].bValid == true) {
+ if (sChannelTbl[ii].bValid) {
if (byOptionChannel == 0) {
byOptionChannel = (unsigned char) ii;
}
@@ -780,7 +780,7 @@ unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
} else {
byOptionChannel = 0;
for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if (sChannelTbl[ii].bValid == true) {
+ if (sChannelTbl[ii].bValid) {
if (sChannelTbl[ii].byMAP == 0) {
aiWeight[ii] += 100;
} else if (sChannelTbl[ii].byMAP & 0x01) {
@@ -807,7 +807,7 @@ unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
}
}
for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if ((sChannelTbl[ii].bValid == true) &&
+ if (sChannelTbl[ii].bValid &&
(aiWeight[ii] > aiWeight[byOptionChannel])) {
byOptionChannel = (unsigned char) ii;
}
diff --git a/drivers/staging/vt6655/datarate.c b/drivers/staging/vt6655/datarate.c
index e7b6bc7..c9a89cd 100644
--- a/drivers/staging/vt6655/datarate.c
+++ b/drivers/staging/vt6655/datarate.c
@@ -218,8 +218,7 @@ RATEvParseMaxRate(
for (ii = 0; ii < uRateLen; ii++) {
byRate = (unsigned char)(pItemRates->abyRates[ii]);
- if (WLAN_MGMT_IS_BASICRATE(byRate) &&
- (bUpdateBasicRate == true)) {
+ if (WLAN_MGMT_IS_BASICRATE(byRate) && bUpdateBasicRate) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
@@ -329,7 +328,7 @@ RATEvTxRateFallBack(
for (ii = 0; ii < MAX_RATE; ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii] == true) {
+ if (bAutoRate[ii]) {
wIdxUpRate = (unsigned short) ii;
}
} else {
@@ -354,8 +353,7 @@ RATEvTxRateFallBack(
wIdxDownRate = psNodeDBTable->wTxDataRate;
for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
- if ((dwThroughputTbl[ii] > dwThroughput) &&
- (bAutoRate[ii] == true)) {
+ if ((dwThroughputTbl[ii] > dwThroughput) && bAutoRate[ii]) {
dwThroughput = dwThroughputTbl[ii];
wIdxDownRate = (unsigned short) ii;
}
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index ca1b857..062c3a3 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e93fdc8..a952df1 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -267,7 +267,7 @@ static CHIP_INFO chip_info_table[] = {
{0, NULL}
};
-DEFINE_PCI_DEVICE_TABLE(vt6655_pci_id_table) = {
+const struct pci_device_id vt6655_pci_id_table[] = {
{ PCI_VDEVICE(VIA, 0x3253), (kernel_ulong_t)chip_info_table},
{ 0, }
};
@@ -561,7 +561,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == true)
+ if (pDevice->bTxRxAntInv)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -578,13 +578,13 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->dwRxAntennaSel = 0;
if (byValue & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
- if (pDevice->bTxRxAntInv == true)
+ if (pDevice->bTxRxAntInv)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
- if (pDevice->bTxRxAntInv == true)
+ if (pDevice->bTxRxAntInv)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -635,7 +635,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byRFType &= RF_MASK;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRFType = %x\n", pDevice->byRFType);
- if (pDevice->bZoneRegExist == false) {
+ if (!pDevice->bZoneRegExist) {
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byZoneType = %x\n", pDevice->byZoneType);
@@ -742,7 +742,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if (!(pDevice->byGPIO & GPIO0_DATA)) { pDevice->bHWRadioOff = false; }
}
- if ((pDevice->bRadioControlOff == true)) {
+ if (pDevice->bRadioControlOff) {
CARDbRadioPowerOff(pDevice);
} else CARDbRadioPowerOn(pDevice);
#else
@@ -751,7 +751,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->bHWRadioOff = true;
}
}
- if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
+ if (pDevice->bHWRadioOff || pDevice->bRadioControlOff) {
CARDbRadioPowerOff(pDevice);
}
@@ -809,7 +809,7 @@ static bool device_release_WPADEV(PSDevice pDevice)
int ii = 0;
// wait_queue_head_t Set_wait;
//send device close to wpa_supplicnat layer
- if (pDevice->bWPADEVUp == true) {
+ if (pDevice->bWPADEVUp) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
wpahdr->resp_ie_len = 0;
@@ -826,7 +826,7 @@ static bool device_release_WPADEV(PSDevice pDevice)
//wait release WPADEV
// init_waitqueue_head(&Set_wait);
// wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait
- while ((pDevice->bWPADEVUp == true)) {
+ while (pDevice->bWPADEVUp) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 20); //wait 50ms
ii++;
@@ -892,7 +892,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
#ifdef DEBUG
printk("Before get pci_info memaddr is %x\n", pDevice->memaddr);
#endif
- if (device_get_pci_info(pDevice, pcid) == false) {
+ if (!device_get_pci_info(pDevice, pcid)) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device.\n");
device_free_info(pDevice);
return -ENODEV;
@@ -1633,7 +1633,7 @@ static int device_tx_srv(PSDevice pDevice, unsigned int uIdx) {
bFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " AC0DMA is Full = %d\n", pDevice->iTDUsed[uIdx]);
}
- if (netif_queue_stopped(pDevice->dev) && (bFull == false)) {
+ if (netif_queue_stopped(pDevice->dev) && !bFull) {
netif_wake_queue(pDevice->dev);
}
}
@@ -1798,7 +1798,7 @@ static int device_open(struct net_device *dev) {
pDevice->byReAssocCount = 0;
pDevice->bWPADEVUp = false;
// Patch: if WEP key already set by iwconfig but device not yet open
- if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
+ if (pDevice->bEncryptionEnable && pDevice->bTransmitKey) {
KeybSetDefaultKey(&(pDevice->sKey),
(unsigned long)(pDevice->byKeyIndex | (1 << 31)),
pDevice->uKeyLength,
@@ -1895,7 +1895,7 @@ static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
return 0;
}
- if (pDevice->bStopTx0Pkt == true) {
+ if (pDevice->bStopTx0Pkt) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -1924,7 +1924,7 @@ bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeI
SKeyItem STempKey;
// unsigned char byKeyIndex = 0;
- if (pDevice->bStopTx0Pkt == true) {
+ if (pDevice->bStopTx0Pkt) {
dev_kfree_skb_irq(skb);
return false;
}
@@ -1993,14 +1993,14 @@ bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeI
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
- if (pDevice->bProtectMode == true) {
+ if (pDevice->bProtectMode) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
- if (pDevice->bEncryptionEnable == true)
+ if (pDevice->bEncryptionEnable)
bNeedEncryption = true;
if (pDevice->bEnableHostWEP) {
@@ -2076,7 +2076,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
bool bNodeExist = false;
spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass == false) {
+ if (!pDevice->bLinkPass) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -2130,7 +2130,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
}
}
- if (bNodeExist == false) {
+ if (!bNodeExist) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Unknown STA not found in node DB \n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
@@ -2149,7 +2149,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
cbFrameBodySize += 8;
}
- if (pDevice->bEncryptionEnable == true) {
+ if (pDevice->bEncryptionEnable) {
bNeedEncryption = true;
// get Transmit key
do {
@@ -2196,7 +2196,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
if (pDevice->bEnableHostWEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "acdma0: STA index %d\n", uNodeIndex);
- if (pDevice->bEncryptionEnable == true) {
+ if (pDevice->bEncryptionEnable) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
@@ -2286,7 +2286,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
- if (pDevice->bProtectMode == true) {
+ if (pDevice->bProtectMode) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
@@ -2297,7 +2297,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
// printk("FIX RATE:CurrentRate is %d");
//#endif
- if (bNeedEncryption == true) {
+ if (bNeedEncryption) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
bNeedEncryption = false;
@@ -2306,7 +2306,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Don't Find TX KEY\n");
} else {
- if (bTKIP_UseGTK == true) {
+ if (bTKIP_UseGTK) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "error: KEY is GTK!!~~\n");
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
@@ -2493,7 +2493,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
MACvSelectPage0(pDevice->PortOffset);
//xxxx
// WCMDbFlushCommandQueue(pDevice->pMgmt, true);
- if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel) == true) {
+ if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel)) {
pDevice->bMeasureInProgress = true;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
@@ -2544,12 +2544,12 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (pDevice->dwIsr & ISR_QUIETSTART) {
do {
;
- } while (CARDbStartQuiet(pDevice) == false);
+ } while (!CARDbStartQuiet(pDevice));
}
}
if (pDevice->dwIsr & ISR_TBTT) {
- if (pDevice->bEnableFirstQuiet == true) {
+ if (pDevice->bEnableFirstQuiet) {
pDevice->byQuietStartCount--;
if (pDevice->byQuietStartCount == 0) {
pDevice->bEnableFirstQuiet = false;
@@ -2558,7 +2558,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
MACvSelectPage0(pDevice->PortOffset);
}
}
- if ((pDevice->bChannelSwitch == true) &&
+ if (pDevice->bChannelSwitch &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
@@ -2575,7 +2575,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
//pDevice->bBeaconSent = false;
} else {
- if ((pDevice->bUpdateBBVGA) && (pDevice->bLinkPass == true) && (pDevice->uCurrRSSI != 0)) {
+ if ((pDevice->bUpdateBBVGA) && pDevice->bLinkPass && (pDevice->uCurrRSSI != 0)) {
long ldBm;
RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
@@ -2642,7 +2642,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
}
pDevice->bBeaconSent = true;
- if (pDevice->bChannelSwitch == true) {
+ if (pDevice->bChannelSwitch) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
pDevice->bChannelSwitch = false;
@@ -3237,7 +3237,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
netif_stop_queue(pDevice->dev);
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- if (pDevice->bWPASuppWextEnabled != true)
+ if (!pDevice->bWPASuppWextEnabled)
#endif
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
@@ -3373,7 +3373,7 @@ viawget_resume(struct pci_dev *pcid)
spin_lock_irq(&pDevice->lock);
MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
device_init_registers(pDevice, DEVICE_INIT_DXPL);
- if (pMgmt->sNodeDBTable[0].bActive == true) { // Assoc with BSS
+ if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
pMgmt->sNodeDBTable[0].bActive = false;
pDevice->bLinkPass = false;
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 0ff51cb..0a29c90 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -169,7 +169,7 @@ s_vProcessRxMACHeader(PSDevice pDevice, unsigned char *pbyRxBufferAddr,
}
} else {
cbHeaderSize += WLAN_HDR_ADDR3_LEN;
- };
+ }
pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_Bridgetunnel)) {
@@ -263,7 +263,7 @@ s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
}
}
- };
+ }
*pcbHeaderSize = cbHeaderSize;
}
@@ -379,7 +379,7 @@ device_receive_frame(
pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8);
//PLICE_DEBUG<-
- if (pDevice->bMeasureInProgress == true) {
+ if (pDevice->bMeasureInProgress) {
if ((*pbyRsr & RSR_CRCOK) != 0) {
pDevice->byBasicMap |= 0x01;
}
@@ -436,7 +436,7 @@ device_receive_frame(
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == true) {
+ if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex)) {
return false;
}
}
@@ -592,7 +592,7 @@ device_receive_frame(
}
} else {
// Control Frame
- };
+ }
return false;
} else {
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -608,8 +608,7 @@ device_receive_frame(
}
} else {
// discard DATA packet while not associate || BSSID error
- if ((pDevice->bLinkPass == false) ||
- !(*pbyRsr & RSR_BSSIDOK)) {
+ if (!pDevice->bLinkPass || !(*pbyRsr & RSR_BSSIDOK)) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc more frag bufs\n",
@@ -658,7 +657,7 @@ device_receive_frame(
// Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
if (pDevice->bDiversityEnable && (FrameSize > 50) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == true)) {
+ pDevice->bLinkPass) {
BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
}
@@ -683,7 +682,7 @@ device_receive_frame(
// -----------------------------------------------
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == true)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnable8021x) {
unsigned char abyMacHdr[24];
// Only 802.1x packet incoming allowed
@@ -698,7 +697,7 @@ device_receive_frame(
if (wEtherType == ETH_P_PAE) {
skb->dev = pDevice->apdev;
- if (bIsWEP == true) {
+ if (bIsWEP) {
// strip IV header(8)
memcpy(&abyMacHdr[0], (skb->data + 4), 24);
memcpy((skb->data + 4 + cbIVOffset), &abyMacHdr[0], 24);
@@ -770,8 +769,9 @@ device_receive_frame(
//DBG_PRN_GRP12(("LocalL: %lx, LocalR: %lx\n", dwLocalMIC_L, dwLocalMIC_R));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dwMICKey0= %lx,dwMICKey1= %lx \n", dwMICKey0, dwMICKey1);
- if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) || (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
- (pDevice->bRxMICFail == true)) {
+ if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) ||
+ (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
+ pDevice->bRxMICFail) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC comparison is fail!\n");
pDevice->bRxMICFail = false;
//pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++;
@@ -894,13 +894,13 @@ device_receive_frame(
return false;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxData(pDevice,
+ if (!s_bAPModeRxData(pDevice,
skb,
FrameSize,
cbHeaderOffset,
iSANodeIndex,
iDANodeIndex
-) == false) {
+)) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc more frag bufs\n",
@@ -1123,7 +1123,7 @@ static bool s_bHandleRxEncryption(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pKey == NULL\n");
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
+ } else if (pDevice->bLinkPass) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
return false;
@@ -1131,7 +1131,7 @@ static bool s_bHandleRxEncryption(
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
+ } else if (pDevice->bLinkPass) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
*pKeyOut = NULL;
@@ -1234,7 +1234,7 @@ static bool s_bHostWepRxEncryption(
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
+ } else if (pDevice->bLinkPass) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
return false;
@@ -1245,7 +1245,7 @@ static bool s_bHostWepRxEncryption(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "byDecMode == KEY_CTL_WEP \n");
if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
(((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) ||
- (bOnFly == false)) {
+ !bOnFly) {
// Software WEP
// 1. 3253A
// 2. WEP 256
@@ -1277,7 +1277,7 @@ static bool s_bHostWepRxEncryption(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "TSC0_15: %x\n", *pwRxTSC15_0);
if (byDecMode == KEY_CTL_TKIP) {
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == false)) {
+ if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || !bOnFly) {
// Software TKIP
// 1. 3253 A
// 2. NotOnFly
@@ -1297,7 +1297,7 @@ static bool s_bHostWepRxEncryption(
}
if (byDecMode == KEY_CTL_CCMP) {
- if (bOnFly == false) {
+ if (!bOnFly) {
// Software CCMP
// NotOnFly
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "soft KEY_CTL_CCMP\n");
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index ab8b2ba..6eecd53 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -454,7 +454,7 @@ static int hostap_set_encryption(PSDevice pDevice,
unsigned long dwKeyIndex = 0;
unsigned char abyKey[MAX_KEY_LEN];
unsigned char abySeq[MAX_KEY_LEN];
- NDIS_802_11_KEY_RSC KeyRSC;
+ unsigned long long KeyRSC;
unsigned char byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int iNodeIndex = -1;
@@ -495,11 +495,11 @@ static int hostap_set_encryption(PSDevice pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " hostap_set_encryption: alg %d \n", param->u.crypt.alg);
if (param->u.crypt.alg == WPA_ALG_NONE) {
- if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == true) {
- if (KeybRemoveKey(&(pDevice->sKey),
+ if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly) {
+ if (!KeybRemoveKey(&(pDevice->sKey),
param->sta_addr,
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex,
- pDevice->PortOffset) == false) {
+ pDevice->PortOffset)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybRemoveKey fail \n");
}
pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
@@ -557,7 +557,7 @@ static int hostap_set_encryption(PSDevice pDevice,
(unsigned char *)abyKey,
KEY_CTL_WEP,
pDevice->PortOffset,
- pDevice->byLocalID) == true) {
+ pDevice->byLocalID)) {
pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
@@ -624,7 +624,7 @@ static int hostap_set_encryption(PSDevice pDevice,
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true) {
+ pDevice->byLocalID)) {
pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
@@ -636,7 +636,7 @@ static int hostap_set_encryption(PSDevice pDevice,
}
- if (bKeyTableFull == true) {
+ if (bKeyTableFull) {
wKeyCtl &= 0x7F00; // clear all key control filed
wKeyCtl |= (byKeyDecMode << 4);
wKeyCtl |= (byKeyDecMode);
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 4bff8aa..ac3fc16 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -1632,7 +1632,7 @@ int iwctl_giwsens(struct net_device *dev,
wrq->value = ldBm;
} else {
wrq->value = 0;
- };
+ }
wrq->disabled = (wrq->value == 0);
wrq->fixed = 1;
@@ -1827,7 +1827,7 @@ int iwctl_siwencodeext(struct net_device *dev,
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct viawget_wpa_param *param = NULL;
//original member
- wpa_alg alg_name;
+ enum wpa_alg alg_name;
u8 addr[6];
int key_idx, set_tx = 0;
u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 04c1304..eab3b41 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -64,13 +64,12 @@ s_vCheckKeyTableValid(PSKeyManagement pTable, unsigned long dwIoBase)
int i;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[0].bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[1].bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[2].bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[3].bKeyValid == false)
-) {
+ if (pTable->KeyTable[i].bInUse &&
+ !pTable->KeyTable[i].PairwiseKey.bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[0].bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[1].bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[2].bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[3].bKeyValid) {
pTable->KeyTable[i].bInUse = false;
pTable->KeyTable[i].wKeyCtl = 0;
pTable->KeyTable[i].bSoftWEP = false;
@@ -140,17 +139,17 @@ bool KeybGetKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
return true;
} else {
return false;
}
} else if (dwKeyIndex < MAX_GROUP_KEY) {
- if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == true) {
+ if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid) {
*pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]);
return true;
} else {
@@ -202,12 +201,11 @@ bool KeybSetKey(
j = (MAX_KEY_TABLE-1);
for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
- if ((pTable->KeyTable[i].bInUse == false) &&
- (j == (MAX_KEY_TABLE-1))) {
+ if (!pTable->KeyTable[i].bInUse && (j == (MAX_KEY_TABLE-1))) {
// found empty table
j = i;
}
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
@@ -384,7 +382,7 @@ bool KeybRemoveKey(
}
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
@@ -428,7 +426,7 @@ bool KeybRemoveAllKey(
int i, u;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
for (u = 0; u < MAX_GROUP_KEY; u++) {
@@ -461,7 +459,7 @@ void KeyvRemoveWEPKey(
)
{
if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == true) {
+ if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse) {
if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) {
pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) {
@@ -511,10 +509,10 @@ bool KeybGetTransmitKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybGetTransmitKey:");
@@ -535,7 +533,7 @@ bool KeybGetTransmitKey(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ERROR: dwGTKeyIndex == 0 !!!\n");
return false;
}
- if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == true) {
+ if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid) {
*pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybGetTransmitKey:");
@@ -583,8 +581,8 @@ bool KeybCheckPairewiseKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == true)) {
+ if (pTable->KeyTable[i].bInUse &&
+ pTable->KeyTable[i].PairwiseKey.bKeyValid) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
return true;
}
@@ -657,7 +655,7 @@ bool KeybSetDefaultKey(
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match
pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = true;
} else {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == false)
+ if (!pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP)
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match
}
@@ -740,7 +738,7 @@ bool KeybSetAllGroupKey(
}
for (i = 0; i < MAX_KEY_TABLE - 1; i++) {
- if (pTable->KeyTable[i].bInUse == true) {
+ if (pTable->KeyTable[i].bInUse) {
// found table already exist
// Group key
pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 001d15c..21bd8a1 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -957,13 +957,13 @@ bool MACbSafeStop(unsigned long dwIoBase)
{
MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
- if (MACbSafeRxOff(dwIoBase) == false) {
+ if (!MACbSafeRxOff(dwIoBase)) {
DBG_PORT80(0xA1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " MACbSafeRxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
return false;
}
- if (MACbSafeTxOff(dwIoBase) == false) {
+ if (!MACbSafeTxOff(dwIoBase)) {
DBG_PORT80(0xA2);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " MACbSafeTxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 2340d2f..4bd1ccb 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -260,7 +260,7 @@ PSvSendPSPOLL(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
} else {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet success..\n");
- };
+ }
return;
}
@@ -284,16 +284,15 @@ PSbSendNullPacket(
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned int uIdx;
- if (pDevice->bLinkPass == false) {
+ if (!pDevice->bLinkPass) {
return false;
}
#ifdef TxInSleep
- if ((pDevice->bEnablePSMode == false) &&
- (pDevice->fTxDataInSleep == false)) {
+ if (!pDevice->bEnablePSMode && !pDevice->fTxDataInSleep) {
return false;
}
#else
- if (pDevice->bEnablePSMode == false) {
+ if (!pDevice->bEnablePSMode) {
return false;
}
#endif
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index ce173cc..edb1b27 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -976,7 +976,7 @@ bool RFbSetPower(
}
bResult = RFbRawSetPower(pDevice, byPwr, uRATE);
- if (bResult == true) {
+ if (bResult) {
pDevice->byCurPwr = byPwr;
}
return bResult;
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 3a2661e..6affd6e 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -435,7 +435,7 @@ s_uGetDataDuration(
switch (byDurType) {
case DATADUR_B: //DATADUR_B
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -458,7 +458,7 @@ s_uGetDataDuration(
break;
case DATADUR_A: //DATADUR_A
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -481,7 +481,7 @@ s_uGetDataDuration(
break;
case DATADUR_A_F0: //DATADUR_A_F0
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -523,7 +523,7 @@ s_uGetDataDuration(
break;
case DATADUR_A_F1: //DATADUR_A_F1
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -2212,7 +2212,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
else {
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- };
+ }
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
@@ -2686,7 +2686,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
}
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- };
+ }
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
index d2bdb71..e78aedf 100644
--- a/drivers/staging/vt6655/vntwifi.c
+++ b/drivers/staging/vt6655/vntwifi.c
@@ -494,7 +494,7 @@ VNTWIFIvUpdateNodeTxCounter(
}
}
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++;
- if (bTxOk == true) {
+ if (bTxOk) {
// transmit success, TxAttempts at least plus one
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++;
@@ -584,7 +584,7 @@ VNTWIFIbyGetKeyCypher(
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- if (bGroupKey == true) {
+ if (bGroupKey) {
return pMgmt->byCSSGK;
} else {
return pMgmt->byCSSPK;
@@ -731,7 +731,7 @@ VNTWIFIbMeasureReport(
pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len);
pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID;
}
- if (bEndOfReport == true) {
+ if (bEndOfReport) {
IEEE11hbMSRRepTx(pMgmt);
}
//spin_unlock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
index 9c57eef..72caaa2 100644
--- a/drivers/staging/vt6655/wcmd.c
+++ b/drivers/staging/vt6655/wcmd.c
@@ -317,7 +317,7 @@ vCommandTimer(
if (pDevice->dwDiagRefCount != 0)
return;
- if (pDevice->bCmdRunning != true)
+ if (!pDevice->bCmdRunning)
return;
spin_lock_irq(&pDevice->lock);
@@ -326,7 +326,7 @@ vCommandTimer(
case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
+ if (pDevice->bRadioOff) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -393,7 +393,7 @@ vCommandTimer(
vAdHocBeaconStop(pDevice);
- if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel) == true) {
+ if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "SCAN Channel: %d\n", pMgmt->uScanChannel);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "SET SCAN Channel Fail: %d\n", pMgmt->uScanChannel);
@@ -408,7 +408,7 @@ vCommandTimer(
}
- if ((pMgmt->b11hEnable == false) ||
+ if (!pMgmt->b11hEnable ||
(pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
s_vProbeChannel(pDevice);
spin_unlock_irq(&pDevice->lock);
@@ -498,7 +498,7 @@ vCommandTimer(
case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
+ if (pDevice->bRadioOff) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -659,7 +659,7 @@ vCommandTimer(
netif_wake_queue(pDevice->dev);
}
#ifdef TxInSleep
- if (pDevice->IsTxDataTrigger != false) { //TxDataTimer is not triggered at the first time
+ if (pDevice->IsTxDataTrigger) { //TxDataTimer is not triggered at the first time
del_timer(&pDevice->sTimerTxData);
init_timer(&pDevice->sTimerTxData);
pDevice->sTimerTxData.data = (unsigned long) pDevice;
@@ -694,7 +694,7 @@ vCommandTimer(
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pDevice->bLinkPass = false;
- if (pDevice->bEnableHostWEP == true)
+ if (pDevice->bEnableHostWEP)
BSSvClearNodeDBTable(pDevice, 1);
else
BSSvClearNodeDBTable(pDevice, 0);
@@ -776,7 +776,7 @@ vCommandTimer(
case WLAN_CMD_RADIO_START:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "eCommandState == WLAN_CMD_RADIO_START\n");
- if (pDevice->bRadioCmd == true)
+ if (pDevice->bRadioCmd)
CARDbRadioPowerOn(pDevice);
else
CARDbRadioPowerOff(pDevice);
@@ -948,7 +948,7 @@ bool bScheduleCommand(
ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
pDevice->cbFreeCmdQueue--;
- if (pDevice->bCmdRunning == false) {
+ if (!pDevice->bCmdRunning) {
s_bCommandComplete(pDevice);
} else {
}
@@ -1031,8 +1031,8 @@ BSSvSecondTxData(
spin_lock_irq(&pDevice->lock);
#if 1
- if (((pDevice->bLinkPass == true) && (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
- (pDevice->fWPA_Authened == true)) { //wpa linking
+ if ((pDevice->bLinkPass && (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
+ pDevice->fWPA_Authened) { //wpa linking
#else
if (pDevice->bLinkPass == true) {
#endif
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
index f05f9f5..950039f 100644
--- a/drivers/staging/vt6655/wctl.c
+++ b/drivers/staging/vt6655/wctl.c
@@ -110,7 +110,7 @@ unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
unsigned int ii;
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if ((pDevice->sRxDFCB[ii].bInUse == true) &&
+ if (pDevice->sRxDFCB[ii].bInUse &&
ether_addr_equal(pDevice->sRxDFCB[ii].abyAddr2,
pMACHeader->abyAddr2)) {
//
@@ -141,7 +141,7 @@ unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
if (pDevice->cbFreeDFCB == 0)
return pDevice->cbDFCB;
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (pDevice->sRxDFCB[ii].bInUse == false) {
+ if (!pDevice->sRxDFCB[ii].bInUse) {
pDevice->cbFreeDFCB--;
pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
pDevice->sRxDFCB[ii].bInUse = true;
@@ -174,7 +174,7 @@ bool WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader, unsigned i
{
unsigned int uHeaderSize;
- if (bWEP == true) {
+ if (bWEP) {
uHeaderSize = 28;
if (bExtIV)
// ExtIV
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index ed4b32b..5200a2a 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -468,15 +468,15 @@ vMgrAssocBeginSta(
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter) == true) {
+ if (CARDbIsShorSlotTime(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
}
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter) == true) {
+ if (CARDbIsShortPreamble(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == true)
+ if (pMgmt->b11hEnable)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
/* build an assocreq frame and send it */
@@ -539,15 +539,15 @@ vMgrReAssocBeginSta(
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter) == true) {
+ if (CARDbIsShorSlotTime(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
}
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter) == true) {
+ if (CARDbIsShortPreamble(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == true)
+ if (pMgmt->b11hEnable)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
pTxPacket = s_MgrMakeReAssocRequest
@@ -736,7 +736,7 @@ s_vMgrRxAssocRequest(
pDevice->bProtectMode = true;
pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->bBarkerPreambleMd = true;
}
@@ -891,7 +891,7 @@ s_vMgrRxReAssocRequest(
pDevice->bProtectMode = true;
pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->bBarkerPreambleMd = true;
}
@@ -1719,7 +1719,7 @@ s_vMgrRxDeauthentication(
}
/* else, ignore it. TODO: IBSS authentication service
would be implemented here */
- };
+ }
return;
}
@@ -1837,7 +1837,7 @@ s_vMgrRxBeacon(
bChannelHit = true;
}
//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel) == true)
+ if (ChannelExceedZoneType(pDevice, byCurrChannel))
return;
if (sFrame.pERP != NULL) {
@@ -1957,9 +1957,9 @@ s_vMgrRxBeacon(
}
}
- if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo) == true) &&
- (bIsBSSIDEqual == true) &&
- (bIsSSIDEqual == true) &&
+ if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo) &&
+ bIsBSSIDEqual &&
+ bIsSSIDEqual &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// add state check to prevent reconnect fail since we'll receive Beacon
@@ -2001,7 +2001,7 @@ s_vMgrRxBeacon(
&(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
&(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
);
- if (bUpdatePhyParameter == true) {
+ if (bUpdatePhyParameter) {
CARDbSetPhyParameter(pMgmt->pAdapter,
pMgmt->eCurrentPHYMode,
pMgmt->wCurrCapInfo,
@@ -2023,7 +2023,7 @@ s_vMgrRxBeacon(
sFrame.pIE_CHSW->byCount
);
- } else if (bIsChannelEqual == false) {
+ } else if (!bIsChannelEqual) {
set_channel(pMgmt->pAdapter, pBSSList->uChannel);
}
}
@@ -2067,12 +2067,12 @@ s_vMgrRxBeacon(
}
// if infra mode
- if (bIsAPBeacon == true) {
+ if (bIsAPBeacon) {
// Infra mode: Local TSF always follow AP's TSF if Difference huge.
if (bTSFLargeDiff)
bUpdateTSF = true;
- if ((pDevice->bEnablePSMode == true) && (sFrame.pTIM != 0)) {
+ if (pDevice->bEnablePSMode && (sFrame.pTIM != 0)) {
// deal with DTIM, analysis TIM
pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
@@ -2092,10 +2092,10 @@ s_vMgrRxBeacon(
pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? true : false;
} else {
pMgmt->bInTIM = false;
- };
+ }
} else {
pMgmt->bInTIM = false;
- };
+ }
if (pMgmt->bInTIM ||
(pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) {
@@ -2110,7 +2110,7 @@ s_vMgrRxBeacon(
} else {
pMgmt->bInTIMWake = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Not In TIM..\n");
- if (pDevice->bPWBitOn == false) {
+ if (!pDevice->bPWBitOn) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Send Null Packet\n");
if (PSbSendNullPacket(pDevice))
pDevice->bPWBitOn = true;
@@ -2332,10 +2332,10 @@ vMgrCreateOwnIBSS(
}
// Disable Protect Mode
- pDevice->bProtectMode = 0;
+ pDevice->bProtectMode = false;
MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = 0;
+ pDevice->bBarkerPreambleMd = false;
MACvDisableBarkerPreambleMd(pDevice->PortOffset);
// Kyle Test 2003.11.04
@@ -2480,7 +2480,7 @@ vMgrCreateOwnIBSS(
pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SHORTPREAMBLE(1));
}
- if ((pMgmt->b11hEnable == true) &&
+ if (pMgmt->b11hEnable &&
(pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
} else {
@@ -2530,7 +2530,7 @@ vMgrJoinBSSBegin(
unsigned char byTopOFDMBasicRate = RATE_1M;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive == true)
+ if (pMgmt->sBSSList[ii].bActive)
break;
}
@@ -2656,7 +2656,7 @@ vMgrJoinBSSBegin(
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
bool bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bAdd_PMKID_Candidate: 1(%d)\n", bResult);
- if (bResult == false) {
+ if (!bResult) {
vFlush_PMKID_Candidate((void *)pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vFlush_PMKID_Candidate: 4\n");
bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
@@ -2671,19 +2671,19 @@ vMgrJoinBSSBegin(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "End of Join AP -- A/B/G Action\n");
} else {
pMgmt->eCurrState = WMAC_STATE_IDLE;
- };
+ }
} else {
// ad-hoc mode BSS
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == false) {
+ if (!WPA_SearchRSN(0, WPA_TKIP, pCurr)) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
}
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == false) {
+ if (!WPA_SearchRSN(0, WPA_AESCCMP, pCurr)) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
@@ -2740,8 +2740,8 @@ vMgrJoinBSSBegin(
bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
} else {
pMgmt->eCurrState = WMAC_STATE_IDLE;
- };
- };
+ }
+ }
return;
}
@@ -2776,10 +2776,10 @@ s_vMgrSynchBSS(
*pStatus = CMD_STATUS_FAILURE;
- if (s_bCipherMatch(pCurr,
+ if (!s_bCipherMatch(pCurr,
pDevice->eEncryptionStatus,
&(pMgmt->byCSSPK),
- &(pMgmt->byCSSGK)) == false) {
+ &(pMgmt->byCSSGK))) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_bCipherMatch Fail .......\n");
return;
}
@@ -2869,18 +2869,17 @@ s_vMgrSynchBSS(
CARDbSetBSSID(pMgmt->pAdapter, pCurr->abyBSSID, OP_MODE_ADHOC);
}
- if (CARDbSetPhyParameter(pMgmt->pAdapter,
+ if (!CARDbSetPhyParameter(pMgmt->pAdapter,
ePhyType,
pCurr->wCapInfo,
pCurr->sERP.byERP,
pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates
- ) != true) {
+ pMgmt->abyCurrExtSuppRates)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Phy Mode Fail [%d]\n", ePhyType);
return;
}
// set channel and clear NAV
- if (set_channel(pMgmt->pAdapter, pCurr->uChannel) == false) {
+ if (!set_channel(pMgmt->pAdapter, pCurr->uChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Channel [%d]\n", pCurr->uChannel);
return;
}
@@ -2924,7 +2923,7 @@ static void Encyption_Rebuild(
if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selection,
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-select it according to real pairwise-key info.
- if (pCurr->bWPAValid == true) { //WPA-PSK
+ if (pCurr->bWPAValid) { //WPA-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
if (pCurr->abyPKType[0] == WPA_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -2933,7 +2932,7 @@ static void Encyption_Rebuild(
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES
PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n");
}
- } else if (pCurr->bWPA2Valid == true) { //WPA2-PSK
+ } else if (pCurr->bWPA2Valid) { //WPA2-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
if (pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -3150,8 +3149,7 @@ s_MgrMakeBeacon(
}
}
- if ((pMgmt->b11hEnable == true) &&
- (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
+ if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
// Country IE
pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
set_country_IE(pMgmt->pAdapter, pbyBuffer);
@@ -3164,7 +3162,7 @@ s_MgrMakeBeacon(
((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
pbyBuffer += (1) + WLAN_IEHDR_LEN;
uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel == true) {
+ if (pMgmt->bSwitchChannel) {
// Channel Switch IE
((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
@@ -3193,7 +3191,7 @@ s_MgrMakeBeacon(
pbyBuffer += (7) + WLAN_IEHDR_LEN;
uLength += (7) + WLAN_IEHDR_LEN;
for (ii = CB_MAX_CHANNEL_24G+1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == true) {
+ if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
pbyBuffer += 2;
uLength += 2;
pIBSSDFS->len += 2;
@@ -3209,11 +3207,11 @@ s_MgrMakeBeacon(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == true)
+ if (pDevice->bProtectMode)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == true)
+ if (pDevice->bNonERPPresent)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == true)
+ if (pDevice->bBarkerPreambleMd)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
@@ -3225,7 +3223,7 @@ s_MgrMakeBeacon(
);
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3338,16 +3336,15 @@ s_MgrMakeProbeResponse(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == true)
+ if (pDevice->bProtectMode)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == true)
+ if (pDevice->bNonERPPresent)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == true)
+ if (pDevice->bBarkerPreambleMd)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
- if ((pMgmt->b11hEnable == true) &&
- (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
+ if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
// Country IE
pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
set_country_IE(pMgmt->pAdapter, pbyBuffer);
@@ -3360,7 +3357,7 @@ s_MgrMakeProbeResponse(
((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
pbyBuffer += (1) + WLAN_IEHDR_LEN;
uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel == true) {
+ if (pMgmt->bSwitchChannel) {
// Channel Switch IE
((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
@@ -3389,7 +3386,7 @@ s_MgrMakeProbeResponse(
pbyBuffer += (7) + WLAN_IEHDR_LEN;
uLength += (7) + WLAN_IEHDR_LEN;
for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == true) {
+ if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
pbyBuffer += 2;
uLength += 2;
pIBSSDFS->len += 2;
@@ -3409,7 +3406,7 @@ s_MgrMakeProbeResponse(
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3507,7 +3504,7 @@ s_MgrMakeAssocRequest(
pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
// for 802.11h
- if (pMgmt->b11hEnable == true) {
+ if (pMgmt->b11hEnable) {
if (sFrame.pCurrPowerCap == NULL) {
sFrame.pCurrPowerCap = (PWLAN_IE_PW_CAP)(sFrame.pBuf + sFrame.len);
sFrame.len += (2 + WLAN_IEHDR_LEN);
@@ -3650,7 +3647,7 @@ s_MgrMakeAssocRequest(
sFrame.pRSN->len += 6;
// RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3658,7 +3655,7 @@ s_MgrMakeAssocRequest(
}
sFrame.pRSN->len += 2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
@@ -3896,7 +3893,7 @@ s_MgrMakeReAssocRequest(
sFrame.pRSN->len += 6;
// RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3904,7 +3901,7 @@ s_MgrMakeReAssocRequest(
}
sFrame.pRSN->len += 2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
@@ -4141,7 +4138,7 @@ s_vMgrRxProbeResponse(
}
//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel) == true)
+ if (ChannelExceedZoneType(pDevice, byCurrChannel))
return;
if (sFrame.pERP != NULL) {
@@ -4578,7 +4575,7 @@ bAdd_PMKID_Candidate(
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -4589,7 +4586,7 @@ bAdd_PMKID_Candidate(
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -4650,7 +4647,7 @@ s_bCipherMatch(
}
if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPA2Valid == true) &&
+ pBSSNode->bWPA2Valid &&
//20080123-01,<Add> by Einsn Liu
((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
//WPA2
@@ -4684,7 +4681,7 @@ s_bCipherMatch(
}
} else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPAValid == true) &&
+ pBSSNode->bWPAValid &&
((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
//WPA
// check Group Key Cipher
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
index b697fa6..990ea0f 100644
--- a/drivers/staging/vt6655/wpa.c
+++ b/drivers/staging/vt6655/wpa.c
@@ -241,7 +241,7 @@ WPA_SearchRSN(
int ii;
unsigned char byPKType = WPA_NONE;
- if (pBSSList->bWPAValid == false)
+ if (!pBSSList->bWPAValid)
return false;
switch (byCmd) {
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
index 089788d..2013122 100644
--- a/drivers/staging/vt6655/wpa2.c
+++ b/drivers/staging/vt6655/wpa2.c
@@ -42,14 +42,14 @@ static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
-const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
+static const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
+static const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
+static const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*--------------------- Static Functions --------------------------*/
@@ -192,7 +192,7 @@ WPA2vParseRSN(
break;
} //for
- if (bUseGK == true) {
+ if (bUseGK) {
if (j != 1) {
// invalid CSS, This should be only PK CSS.
return;
@@ -335,7 +335,7 @@ WPA2uSetIEs(
pRSNIEs->len += 2;
if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- (pMgmt->bRoaming == true) &&
+ pMgmt->bRoaming &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pwPMKID = (unsigned short *)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 044368a..d17224f 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -202,11 +202,11 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
int uu, ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP ||
- param->u.wpa_key.key_len >= MAX_KEY_LEN ||
- param->u.wpa_key.seq_len >= MAX_KEY_LEN)
+ param->u.wpa_key.key_len > MAX_KEY_LEN ||
+ param->u.wpa_key.seq_len > MAX_KEY_LEN)
return -EINVAL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d\n", param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bEncryptionEnable = false;
@@ -341,22 +341,22 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
// If is_broadcast_ether_addr, set the key as every key entry's group key.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
- if ((KeybSetAllGroupKey(&(pDevice->sKey),
+ if (KeybSetAllGroupKey(&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true) &&
- (KeybSetDefaultKey(&(pDevice->sKey),
+ pDevice->byLocalID) &&
+ KeybSetDefaultKey(&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true)) {
+ pDevice->byLocalID)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
@@ -389,7 +389,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true) {
+ pDevice->byLocalID)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
@@ -415,7 +415,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
//spin_unlock_irq(&pDevice->lock);
/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n",
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx\n",
pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][0],
pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][1],
pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][2],
@@ -596,7 +596,7 @@ static int wpa_get_scan(PSDevice pDevice,
ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC);
if (ptempBSS == NULL) {
- printk("bubble sort kmalloc memory fail@@@\n");
+ printk(KERN_ERR "bubble sort kmalloc memory fail@@@\n");
ret = -ENOMEM;
@@ -804,7 +804,7 @@ static int wpa_set_associate(PSDevice pDevice,
else
pDevice->bEncryptionEnable = false;
if (!((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bWepEnabled == true)))) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
+ ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && bWepEnabled))) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = false;
@@ -869,18 +869,18 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
switch (param->cmd) {
case VIAWGET_SET_WPA:
ret = wpa_set_wpa(pDevice, param);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA\n");
break;
case VIAWGET_SET_KEY:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY\n");
spin_lock_irq(&pDevice->lock);
ret = wpa_set_keys(pDevice, param, false);
spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN\n");
ret = wpa_set_scan(pDevice, param);
break;
@@ -891,40 +891,40 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
break;
case VIAWGET_GET_SSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID\n");
ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID\n");
ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE\n");
ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE\n");
ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT\n");
break;
case VIAWGET_SET_DEAUTHENTICATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE\n");
break;
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
param->cmd);
- return -EOPNOTSUPP;
- break;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if ((ret == 0) && wpa_ioctl) {
diff --git a/drivers/staging/vt6655/wpactl.h b/drivers/staging/vt6655/wpactl.h
index b9e2ab2..f7638ba 100644
--- a/drivers/staging/vt6655/wpactl.h
+++ b/drivers/staging/vt6655/wpactl.h
@@ -38,11 +38,11 @@
//WPA related
-typedef enum { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP } wpa_alg;
-typedef enum { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
- CIPHER_WEP104 } wpa_cipher;
-typedef enum { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
- KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE } wpa_key_mgmt;
+enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
+enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
+ CIPHER_WEP104 };
+enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
+ KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE };
#define AUTH_ALG_OPEN_SYSTEM 0x01
#define AUTH_ALG_SHARED_KEY 0x02
@@ -51,8 +51,6 @@ typedef enum { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
#define GENERIC_INFO_ELEM 0xdd
#define RSN_INFO_ELEM 0x30
-typedef unsigned long long NDIS_802_11_KEY_RSC;
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
index 85302c5..c39d5ed 100644
--- a/drivers/staging/vt6655/wroute.c
+++ b/drivers/staging/vt6655/wroute.c
@@ -63,7 +63,8 @@ static int msglevel = MSG_LEVEL_INFO;
* Return Value: true if packet duplicate; otherwise false
*
*/
-bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex)
+bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData,
+ unsigned int uDataLen, unsigned int uNodeIndex)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
@@ -78,7 +79,8 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
unsigned char *pbyBSSID;
if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Relay can't allocate TD1..\n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Relay can't allocate TD1..\n");
return false;
}
@@ -86,22 +88,24 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
pHeadTD->m_td1TD1.byTCR = (TCR_EDP | TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)pbySkbData, ETH_HLEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, pbySkbData, ETH_HLEN);
cbFrameBodySize = uDataLen - ETH_HLEN;
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
+ if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
cbFrameBodySize += 8;
- }
if (pDevice->bEncryptionEnable == true) {
bNeedEncryption = true;
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID,
+ GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_DEBUG "KEY is NULL. [%d]\n",
+ pDevice->pMgmt->eCurrMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Get GTK.\n");
}
@@ -117,25 +121,24 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
-);
+ pTransmitKey->uKeyLength);
}
}
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
+ uMACfragNum = cbGetFragCount(pDevice, pTransmitKey,
+ cbFrameBodySize, &pDevice->sTxEthHeader);
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) {
+ if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA))
return false;
- }
- byPktType = (unsigned char)pDevice->byPacketType;
+
+ byPktType = pDevice->byPacketType;
if (pDevice->bFixRate) {
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M) {
+ if (pDevice->uConnectionRate >= RATE_11M)
pDevice->wCurrentRate = RATE_11M;
- } else {
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- }
+ else
+ pDevice->wCurrentRate = pDevice->uConnectionRate;
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
(pDevice->uConnectionRate <= RATE_6M)) {
@@ -144,7 +147,7 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = pDevice->uConnectionRate;
}
}
} else {
@@ -154,12 +157,11 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
if (pDevice->wCurrentRate <= RATE_11M)
byPktType = PK_TYPE_11B;
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
- cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
- &pDevice->sTxEthHeader, pbySkbData, pTransmitKey, uNodeIndex,
- &uMACfragNum,
- &cbHeaderSize
-);
+ vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff,
+ bNeedEncryption, cbFrameBodySize, TYPE_AC0DMA,
+ pHeadTD, &pDevice->sTxEthHeader, pbySkbData,
+ pTransmitKey, uNodeIndex, &uMACfragNum,
+ &cbHeaderSize);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile
index c998547..1d829b4 100644
--- a/drivers/staging/vt6656/Makefile
+++ b/drivers/staging/vt6656/Makefile
@@ -16,7 +16,6 @@ vt6656_stage-y += main_usb.o \
dpc.o \
power.o \
datarate.o \
- mib.o \
rc4.o \
tether.o \
tcrc.o \
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index 6c76939..61b9f7b 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -37,7 +37,7 @@
* SBOX Table
*/
-u8 sbox_table[256] = {
+static u8 sbox_table[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
@@ -56,7 +56,7 @@ u8 sbox_table[256] = {
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
-u8 dot2_table[256] = {
+static u8 dot2_table[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
@@ -75,7 +75,7 @@ u8 dot2_table[256] = {
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
-u8 dot3_table[256] = {
+static u8 dot3_table[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
@@ -115,7 +115,7 @@ static void xor_32(u8 *a, u8 *b, u8 *out)
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
-void AddRoundKey(u8 *key, int round)
+static void AddRoundKey(u8 *key, int round)
{
u8 sbox_key[4];
u8 rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
@@ -133,7 +133,7 @@ void AddRoundKey(u8 *key, int round)
xor_32(&key[12], &key[8], &key[12]);
}
-void SubBytes(u8 *in, u8 *out)
+static void SubBytes(u8 *in, u8 *out)
{
int i;
@@ -141,7 +141,7 @@ void SubBytes(u8 *in, u8 *out)
out[i] = sbox_table[in[i]];
}
-void ShiftRows(u8 *in, u8 *out)
+static void ShiftRows(u8 *in, u8 *out)
{
out[0] = in[0];
out[1] = in[5];
@@ -161,7 +161,7 @@ void ShiftRows(u8 *in, u8 *out)
out[15] = in[11];
}
-void MixColumns(u8 *in, u8 *out)
+static void MixColumns(u8 *in, u8 *out)
{
out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
@@ -170,7 +170,7 @@ void MixColumns(u8 *in, u8 *out)
out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
}
-void AESv128(u8 *key, u8 *data, u8 *ciphertext)
+static void AESv128(u8 *key, u8 *data, u8 *ciphertext)
{
int i;
int round;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 4aa5ef5..3d4610e 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -48,7 +48,7 @@
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
-u8 abyVT3184_AGC[] = {
+static u8 abyVT3184_AGC[] = {
0x00, //0
0x00, //1
0x02, //2
@@ -115,7 +115,7 @@ u8 abyVT3184_AGC[] = {
0x3E //3F
};
-u8 abyVT3184_AL2230[] = {
+static u8 abyVT3184_AL2230[] = {
0x31,//00
0x00,
0x00,
@@ -375,7 +375,7 @@ u8 abyVT3184_AL2230[] = {
};
//{{RobertYu:20060515, new BB setting for VT3226D0
-u8 abyVT3184_VT3226D0[] = {
+static u8 abyVT3184_VT3226D0[] = {
0x31,//00
0x00,
0x00,
@@ -634,7 +634,7 @@ u8 abyVT3184_VT3226D0[] = {
0x00,
};
-const u16 awcFrameTime[MAX_RATE] =
+static const u16 awcFrameTime[MAX_RATE] =
{10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216};
/*
@@ -931,191 +931,177 @@ void BBvSetAntennaMode(struct vnt_private *pDevice, u8 byAntennaMode)
*
*/
-int BBbVT3184Init(struct vnt_private *pDevice)
+int BBbVT3184Init(struct vnt_private *priv)
{
- int ntStatus;
- u16 wLength;
- u8 * pbyAddr;
- u8 * pbyAgc;
- u16 wLengthAgc;
- u8 abyArray[256];
+ int status;
+ u16 lenght;
+ u8 *addr;
+ u8 *agc;
+ u16 lenght_agc;
+ u8 array[256];
u8 data;
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- 0,
- MESSAGE_REQUEST_EEPROM,
- EEP_MAX_CONTEXT_SIZE,
- pDevice->abyEEPROM);
- if (ntStatus != STATUS_SUCCESS) {
- return false;
- }
+ status = CONTROLnsRequestIn(priv, MESSAGE_TYPE_READ, 0,
+ MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
+ priv->abyEEPROM);
+ if (status != STATUS_SUCCESS)
+ return false;
+
+ /* zonetype initial */
+ priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
+
+ if (priv->config_file.ZoneType >= 0) {
+ if ((priv->config_file.ZoneType == 0) &&
+ (priv->abyEEPROM[EEP_OFS_ZONETYPE] != 0x00)) {
+ priv->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
+ priv->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Init Zone Type :USA\n");
+ } else if ((priv->config_file.ZoneType == 1) &&
+ (priv->abyEEPROM[EEP_OFS_ZONETYPE] != 0x01)) {
+ priv->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
+ priv->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Init Zone Type :Japan\n");
+ } else if ((priv->config_file.ZoneType == 2) &&
+ (priv->abyEEPROM[EEP_OFS_ZONETYPE] != 0x02)) {
+ priv->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
+ priv->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Init Zone Type :Europe\n");
+ } else {
+ if (priv->config_file.ZoneType !=
+ priv->abyEEPROM[EEP_OFS_ZONETYPE])
+ printk("zonetype in file[%02x]\
+ mismatch with in EEPROM[%02x]\n",
+ priv->config_file.ZoneType,
+ priv->abyEEPROM[EEP_OFS_ZONETYPE]);
+ else
+ printk("Read Zonetype file success,\
+ use default zonetype setting[%02x]\n",
+ priv->config_file.ZoneType);
+ }
+ }
-// if ((pDevice->abyEEPROM[EEP_OFS_RADIOCTL]&0x06)==0x04)
-// return false;
-
-//zonetype initial
- pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- if(pDevice->config_file.ZoneType >= 0) { //read zonetype file ok!
- if ((pDevice->config_file.ZoneType == 0)&&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] !=0x00)){ //for USA
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :USA\n");
- }
- else if((pDevice->config_file.ZoneType == 1)&&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x01)){ //for Japan
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Japan\n");
- }
- else if((pDevice->config_file.ZoneType == 2)&&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x02)){ //for Europe
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Europe\n");
- }
-else {
- if(pDevice->config_file.ZoneType !=pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
- printk("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n",pDevice->config_file.ZoneType,pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
- else
- printk("Read Zonetype file success,use default zonetype setting[%02x]\n",pDevice->config_file.ZoneType);
- }
-}
+ if (!priv->bZoneRegExist)
+ priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
+
+ priv->byRFType = priv->abyEEPROM[EEP_OFS_RFTYPE];
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Zone Type %x\n",
+ priv->byZoneType);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RF Type %d\n", priv->byRFType);
+
+ if ((priv->byRFType == RF_AL2230) ||
+ (priv->byRFType == RF_AL2230S)) {
+ priv->byBBRxConf = abyVT3184_AL2230[10];
+ lenght = sizeof(abyVT3184_AL2230);
+ addr = abyVT3184_AL2230;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ } else if (priv->byRFType == RF_AIROHA7230) {
+ priv->byBBRxConf = abyVT3184_AL2230[10];
+ lenght = sizeof(abyVT3184_AL2230);
+ addr = abyVT3184_AL2230;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ addr[0xd7] = 0x06;
+
+ priv->abyBBVGA[0] = 0x1c;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ } else if ((priv->byRFType == RF_VT3226) ||
+ (priv->byRFType == RF_VT3226D0)) {
+ priv->byBBRxConf = abyVT3184_VT3226D0[10];
+ lenght = sizeof(abyVT3184_VT3226D0);
+ addr = abyVT3184_VT3226D0;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ priv->abyBBVGA[0] = 0x20;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ /* Fix VT3226 DFC system timing issue */
+ MACvRegBitsOn(priv, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
+ } else if ((priv->byRFType == RF_VT3342A0)) {
+ priv->byBBRxConf = abyVT3184_VT3226D0[10];
+ lenght = sizeof(abyVT3184_VT3226D0);
+ addr = abyVT3184_VT3226D0;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ priv->abyBBVGA[0] = 0x20;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ /* Fix VT3226 DFC system timing issue */
+ MACvRegBitsOn(priv, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
+ } else {
+ return true;
+ }
- if ( !pDevice->bZoneRegExist ) {
- pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- }
- pDevice->byRFType = pDevice->abyEEPROM[EEP_OFS_RFTYPE];
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Zone Type %x\n", pDevice->byZoneType);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RF Type %d\n", pDevice->byRFType);
-
- if ((pDevice->byRFType == RF_AL2230) || (pDevice->byRFType == RF_AL2230S)) {
- pDevice->byBBRxConf = abyVT3184_AL2230[10];
- wLength = sizeof(abyVT3184_AL2230);
- pbyAddr = abyVT3184_AL2230;
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- }
- else if (pDevice->byRFType == RF_AIROHA7230) {
- pDevice->byBBRxConf = abyVT3184_AL2230[10];
- wLength = sizeof(abyVT3184_AL2230);
- pbyAddr = abyVT3184_AL2230;
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- // Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
- //pbyAddr[0x09] = 0x41;
- // Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
- //pbyAddr[0x0a] = 0x28;
- // Select VC1/VC2, CR215 = 0x02->0x06
- pbyAddr[0xd7] = 0x06;
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- }
- else if ( (pDevice->byRFType == RF_VT3226) || (pDevice->byRFType == RF_VT3226D0) ) {
- pDevice->byBBRxConf = abyVT3184_VT3226D0[10]; //RobertYu:20060515
- wLength = sizeof(abyVT3184_VT3226D0); //RobertYu:20060515
- pbyAddr = abyVT3184_VT3226D0; //RobertYu:20060515
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- pDevice->abyBBVGA[0] = 0x20; //RobertYu:20060104, reguest by Jack
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- // Fix VT3226 DFC system timing issue
- MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
- //}}
- //{{RobertYu:20060609
- } else if ( (pDevice->byRFType == RF_VT3342A0) ) {
- pDevice->byBBRxConf = abyVT3184_VT3226D0[10];
- wLength = sizeof(abyVT3184_VT3226D0);
- pbyAddr = abyVT3184_VT3226D0;
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- pDevice->abyBBVGA[0] = 0x20;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- // Fix VT3226 DFC system timing issue
- MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
- //}}
- } else {
- return true;
- }
+ memcpy(array, addr, lenght);
- memcpy(abyArray, pbyAddr, wLength);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- 0,
- MESSAGE_REQUEST_BBREG,
- wLength,
- abyArray
- );
-
- memcpy(abyArray, pbyAgc, wLengthAgc);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- 0,
- MESSAGE_REQUEST_BBAGC,
- wLengthAgc,
- abyArray
- );
-
- if ((pDevice->byRFType == RF_VT3226) || //RobertYu:20051116, 20060111 remove VT3226D0
- (pDevice->byRFType == RF_VT3342A0) //RobertYu:20060609
- ) {
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_ITRTMSET,0x23);
- MACvRegBitsOn(pDevice,MAC_REG_PAPEDELAY,0x01);
- }
- else if (pDevice->byRFType == RF_VT3226D0)
- {
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_ITRTMSET,0x11);
- MACvRegBitsOn(pDevice,MAC_REG_PAPEDELAY,0x01);
- }
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, 0,
+ MESSAGE_REQUEST_BBREG, lenght, array);
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x04,0x7F);
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
+ memcpy(array, agc, lenght_agc);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, 0,
+ MESSAGE_REQUEST_BBAGC, lenght_agc, array);
+
+ if ((priv->byRFType == RF_VT3226) ||
+ (priv->byRFType == RF_VT3342A0)) {
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG,
+ MAC_REG_ITRTMSET, 0x23);
+ MACvRegBitsOn(priv, MAC_REG_PAPEDELAY, 0x01);
+ } else if (priv->byRFType == RF_VT3226D0) {
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG,
+ MAC_REG_ITRTMSET, 0x11);
+ MACvRegBitsOn(priv, MAC_REG_PAPEDELAY, 0x01);
+ }
+
+ ControlvWriteByte(priv, MESSAGE_REQUEST_BBREG, 0x04, 0x7f);
+ ControlvWriteByte(priv, MESSAGE_REQUEST_BBREG, 0x0d, 0x01);
+
+ RFbRFTableDownload(priv);
- RFbRFTableDownload(pDevice);
/* Fix for TX USB resets from vendors driver */
- CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
+ CONTROLnsRequestIn(priv, MESSAGE_TYPE_READ, USB_REG4,
MESSAGE_REQUEST_MEM, sizeof(data), &data);
data |= 0x2;
- CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, USB_REG4,
MESSAGE_REQUEST_MEM, sizeof(data), &data);
- return true;//ntStatus;
+ return true;
}
/*
@@ -1464,7 +1450,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70,....
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
@@ -1607,7 +1592,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70, ...
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
@@ -1759,7 +1743,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
case RF_VT3342A0: //RobertYu:20060627, testing table
if( bScanning )
{ // need Max sensitivity //RSSI -67, -68, ...
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index dad3f8c..9c78dab 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -21,22 +21,21 @@
* Purpose: Handles the Basic Service Set & Node Database functions
*
* Functions:
- * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID
- * BSSvClearBSSList - Clear BSS List
- * BSSbInsertToBSSList - Insert a BSS set into known BSS list
- * BSSbUpdateToBSSList - Update BSS set in known BSS list
- * BSSbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr
- * BSSvCreateOneNode - Allocate an Node for Node DB
- * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB
- * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status
- * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fall back rate control
+ * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID
+ * BSSvClearBSSList - Clear BSS List
+ * BSSbInsertToBSSList - Insert a BSS set into known BSS list
+ * BSSbUpdateToBSSList - Update BSS set in known BSS list
+ * BSSbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr
+ * BSSvCreateOneNode - Allocate an Node for Node DB
+ * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB
+ * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status
+ * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fallback rate control
*
* Revision History:
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
- *
*/
#include "tmacro.h"
@@ -59,41 +58,38 @@
#include "iowpa.h"
#include "power.h"
-static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
-
-const u16 awHWRetry0[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
- {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
- {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
- };
-const u16 awHWRetry1[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
- };
+static int msglevel = MSG_LEVEL_INFO;
+/* static int msglevel = MSG_LEVEL_DEBUG; */
+
+static const u16 awHWRetry0[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
+ {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
+ {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
+ };
+static const u16 awHWRetry1[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
+ };
static void s_vCheckSensitivity(struct vnt_private *pDevice);
static void s_vCheckPreEDThreshold(struct vnt_private *pDevice);
static void s_uCalculateLinkQual(struct vnt_private *pDevice);
-/*+
- *
+/*
* Routine Description:
- * Search known BSS list for Desire SSID or BSSID.
+ * Search known BSS list for Desire SSID or BSSID.
*
* Return Value:
- * PTR to KnownBSS or NULL
- *
--*/
-
+ * PTR to KnownBSS or NULL
+ */
PKnownBSS BSSpSearchBSSList(struct vnt_private *pDevice,
- u8 *pbyDesireBSSID, u8 *pbyDesireSSID,
- CARD_PHY_TYPE ePhyType)
+ u8 *pbyDesireBSSID, u8 *pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u8 *pbyBSSID = NULL;
@@ -104,204 +100,202 @@ PKnownBSS BSSpSearchBSSList(struct vnt_private *pDevice,
int ii = 0;
int jj = 0;
- if (pbyDesireBSSID != NULL) {
+ if (pbyDesireBSSID) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
- if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
- (memcmp(pbyDesireBSSID, ZeroBSSID, 6)!= 0)){
- pbyBSSID = pbyDesireBSSID;
- }
- }
- if (pbyDesireSSID != NULL) {
- if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0) {
- pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
- }
- }
-
- if ((pbyBSSID != NULL)&&(pDevice->bRoaming == false)) {
- // match BSSID first
- for (ii = 0; ii <MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
-
- pCurrBSS->bSelected = false;
-
- if ((pCurrBSS->bActive) &&
- (pCurrBSS->bSelected == false)) {
- if (ether_addr_equal(pCurrBSS->abyBSSID, pbyBSSID)) {
- if (pSSID != NULL) {
- // compare ssid
- if ( !memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len)) {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
- ) {
- pCurrBSS->bSelected = true;
- return(pCurrBSS);
- }
- }
- } else {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
- ) {
- pCurrBSS->bSelected = true;
- return(pCurrBSS);
- }
- }
- }
- }
- }
- } else {
- // ignore BSSID
- for (ii = 0; ii <MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
-
- //2007-0721-01<Mark>by MikeLiu
- // if ((pCurrBSS->bActive) &&
- // (pCurrBSS->bSelected == false)) {
-
- pCurrBSS->bSelected = false;
- if (pCurrBSS->bActive) {
-
- if (pSSID != NULL) {
- // matched SSID
- if (memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len) ||
- (pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) {
- // SSID not match skip this BSS
- continue;
- }
- }
- if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))
- ){
- // Type not match skip this BSS
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSS type mismatch.... Config[%d] BSS[0x%04x]\n", pMgmt->eConfigMode, pCurrBSS->wCapInfo);
- continue;
- }
-
- if (ePhyType != PHY_TYPE_AUTO) {
- if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) ||
- ((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
- // PhyType not match skip this BSS
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Physical type mismatch.... ePhyType[%d] BSS[%d]\n", ePhyType, pCurrBSS->eNetworkTypeInUse);
- continue;
- }
- }
-
- pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
- "BSSpSearchBSSList pSelect1[%pM]\n",
- pCurrBSS->abyBSSID);
- jj++;
-
- if (pSelect == NULL) {
- pSelect = pCurrBSS;
- } else {
- // compare RSSI, select the strongest signal
- if (pCurrBSS->uRSSI < pSelect->uRSSI) {
- pSelect = pCurrBSS;
- }
- }
- }
- }
-
-pDevice->bSameBSSMaxNum = jj;
-
- if (pSelect != NULL) {
- pSelect->bSelected = true;
- if (pDevice->bRoaming == false) {
- // Einsn Add @20070907
- memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1) ;
- }
-
- return(pSelect);
- }
- }
- return(NULL);
+ if (!is_broadcast_ether_addr(pbyDesireBSSID) &&
+ memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0)
+ pbyBSSID = pbyDesireBSSID;
+ }
+ if (pbyDesireSSID &&
+ ((PWLAN_IE_SSID) pbyDesireSSID)->len != 0)
+ pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
+
+ if (pbyBSSID && pDevice->bRoaming == false) {
+ /* match BSSID first */
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pCurrBSS = &(pMgmt->sBSSList[ii]);
+
+ pCurrBSS->bSelected = false;
+
+ if (pCurrBSS->bActive &&
+ pCurrBSS->bSelected == false &&
+ ether_addr_equal(pCurrBSS->abyBSSID, pbyBSSID)) {
+ if (pSSID) {
+ /* compare ssid */
+ if (!memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pCurrBSS->abySSID)->abySSID,
+ pSSID->len) &&
+ (pMgmt->eConfigMode == WMAC_CONFIG_AUTO ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA &&
+ WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA &&
+ WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)))) {
+
+ pCurrBSS->bSelected = true;
+ return pCurrBSS;
+ }
+ } else if (pMgmt->eConfigMode == WMAC_CONFIG_AUTO ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA &&
+ WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA &&
+ WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))) {
+ pCurrBSS->bSelected = true;
+ return pCurrBSS;
+ }
+ }
+ }
+ } else {
+ /* ignore BSSID */
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pCurrBSS = &(pMgmt->sBSSList[ii]);
+
+ /* 2007-0721-01<Mark>by MikeLiu
+ * if ((pCurrBSS->bActive) &&
+ * (pCurrBSS->bSelected == false)) { */
+
+ pCurrBSS->bSelected = false;
+ if (pCurrBSS->bActive) {
+
+ if (pSSID &&
+ /* matched SSID */
+ (memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pCurrBSS->abySSID)->abySSID,
+ pSSID->len) ||
+ pSSID->len !=
+ ((PWLAN_IE_SSID) pCurrBSS->abySSID)->len)) {
+ /* SSID not match skip this BSS */
+ continue;
+ }
+
+ if ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA &&
+ WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA &&
+ WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))) {
+ /* Type not match skip this BSS */
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "BSS type mismatch.... Config[%d] BSS[0x%04x]\n",
+ pMgmt->eConfigMode,
+ pCurrBSS->wCapInfo);
+ continue;
+ }
+
+ if (ePhyType != PHY_TYPE_AUTO &&
+ ((ePhyType == PHY_TYPE_11A &&
+ PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse) ||
+ (ePhyType != PHY_TYPE_11A &&
+ PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
+ /* PhyType not match skip this BSS */
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Physical type mismatch.... ePhyType[%d] BSS[%d]\n",
+ ePhyType,
+ pCurrBSS->eNetworkTypeInUse);
+ continue;
+ }
+
+ pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "BSSpSearchBSSList pSelect1[%pM]\n",
+ pCurrBSS->abyBSSID);
+ jj++;
+
+ if (!pSelect)
+ pSelect = pCurrBSS;
+ /* compare RSSI, select the strongest signal */
+ else if (pCurrBSS->uRSSI < pSelect->uRSSI)
+ pSelect = pCurrBSS;
+ }
+ }
+
+ pDevice->bSameBSSMaxNum = jj;
+
+ if (pSelect) {
+ pSelect->bSelected = true;
+ if (pDevice->bRoaming == false) {
+ /* Einsn Add @20070907 */
+ memcpy(pbyDesireSSID,
+ pCurrBSS->abySSID,
+ WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ }
+
+ return pSelect;
+ }
+ }
+ return NULL;
}
-/*+
- *
+/*
* Routine Description:
- * Clear BSS List
+ * Clear BSS List
*
* Return Value:
- * None.
- *
--*/
-
+ * None.
+ */
void BSSvClearBSSList(struct vnt_private *pDevice, int bKeepCurrBSSID)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (bKeepCurrBSSID) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyCurrBSSID)) {
- //mike mark: there are two BSSID's in list. If that AP is in hidden ssid mode, one SSID is null,
- // but other's might not be obvious, so if it associate's with your STA,
- // you must keep the two of them!!
- // bKeepCurrBSSID = false;
- continue;
- }
- }
-
- pMgmt->sBSSList[ii].bActive = false;
- memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
- }
- BSSvClearAnyBSSJoinRecord(pDevice);
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ if (bKeepCurrBSSID &&
+ pMgmt->sBSSList[ii].bActive &&
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyCurrBSSID)) {
+
+ /* mike mark:
+ * there are two BSSID's in list. If that AP is
+ * in hidden ssid mode, one SSID is null, but
+ * other's might not be obvious, so if it
+ * associate's with your STA, you must keep the
+ * two of them!! bKeepCurrBSSID = false;
+ */
+
+ continue;
+ }
+
+ pMgmt->sBSSList[ii].bActive = false;
+ memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
+ }
+ BSSvClearAnyBSSJoinRecord(pDevice);
}
-/*+
- *
+/*
* Routine Description:
- * search BSS list by BSSID & SSID if matched
+ * search BSS list by BSSID & SSID if matched
*
* Return Value:
- * true if found.
- *
--*/
+ * true if found.
+ */
PKnownBSS BSSpAddrIsInBSSList(struct vnt_private *pDevice,
- u8 *abyBSSID, PWLAN_IE_SSID pSSID)
+ u8 *abyBSSID,
+ PWLAN_IE_SSID pSSID)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PKnownBSS pBSSList = NULL;
int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = &(pMgmt->sBSSList[ii]);
- if (pBSSList->bActive) {
- if (ether_addr_equal(pBSSList->abyBSSID, abyBSSID)) {
- if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){
- if (memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
- pSSID->len) == 0)
- return pBSSList;
- }
- }
- }
- }
-
- return NULL;
-};
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSSList = &(pMgmt->sBSSList[ii]);
+ if (pBSSList->bActive &&
+ ether_addr_equal(pBSSList->abyBSSID, abyBSSID) &&
+ pSSID->len == ((PWLAN_IE_SSID) pBSSList->abySSID)->len &&
+ memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pBSSList->abySSID)->abySSID,
+ pSSID->len) == 0)
+ return pBSSList;
+ }
-/*+
- *
+ return NULL;
+}
+
+/*
* Routine Description:
- * Insert a BSS set into known BSS list
+ * Insert a BSS set into known BSS list
*
* Return Value:
- * true if success.
- *
--*/
-
+ * true if success.
+ */
int BSSbInsertToBSSList(struct vnt_private *pDevice,
u8 *abyBSSIDAddr,
u64 qwTimestamp,
@@ -322,162 +316,173 @@ int BSSbInsertToBSSList(struct vnt_private *pDevice,
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct vnt_rx_mgmt *pRxPacket =
- (struct vnt_rx_mgmt *)pRxPacketContext;
+ (struct vnt_rx_mgmt *) pRxPacketContext;
PKnownBSS pBSSList = NULL;
unsigned int ii;
bool bParsingQuiet = false;
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]);
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[ii]);
- if (!pBSSList->bActive)
- break;
- }
-
- if (ii == MAX_BSS_NUM){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n");
- return false;
- }
- // save the BSS info
- pBSSList->bActive = true;
- memcpy( pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
+ pBSSList = (PKnownBSS) &(pMgmt->sBSSList[0]);
+
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSSList = (PKnownBSS) &(pMgmt->sBSSList[ii]);
+ if (!pBSSList->bActive)
+ break;
+ }
+
+ if (ii == MAX_BSS_NUM) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Get free KnowBSS node failed.\n");
+ return false;
+ }
+ /* save the BSS info */
+ pBSSList->bActive = true;
+ memcpy(pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
pBSSList->qwBSSTimestamp = cpu_to_le64(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- memcpy( pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
-
- pBSSList->uChannel = byCurrChannel;
-
- if (pSuppRates->len > WLAN_RATES_MAXLEN)
- pSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy( pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL) {
- if (pExtSuppRates->len > WLAN_RATES_MAXLEN)
- pExtSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbInsertToBSSList: pExtSuppRates->len = %d\n", pExtSuppRates->len);
-
- } else {
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- // Check if BSS is 802.11a/b/g
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist == true) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = true;
- }
- }
-
- WPA_ClearRSN(pBSSList);
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
+ pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
+ pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
+ pBSSList->uClearCount = 0;
+
+ if (pSSID->len > WLAN_SSID_MAXLEN)
+ pSSID->len = WLAN_SSID_MAXLEN;
+ memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
+
+ pBSSList->uChannel = byCurrChannel;
+
+ if (pSuppRates->len > WLAN_RATES_MAXLEN)
+ pSuppRates->len = WLAN_RATES_MAXLEN;
+ memcpy(pBSSList->abySuppRates, pSuppRates,
+ pSuppRates->len + WLAN_IEHDR_LEN);
+
+ if (pExtSuppRates) {
+ if (pExtSuppRates->len > WLAN_RATES_MAXLEN)
+ pExtSuppRates->len = WLAN_RATES_MAXLEN;
+ memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,
+ pExtSuppRates->len + WLAN_IEHDR_LEN);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "BSSbInsertToBSSList: pExtSuppRates->len = %d\n",
+ pExtSuppRates->len);
+
+ } else {
+ memset(pBSSList->abyExtSuppRates, 0,
+ WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
}
- }
+ pBSSList->sERP.byERP = psERP->byERP;
+ pBSSList->sERP.bERPExist = psERP->bERPExist;
+
+ /* Check if BSS is 802.11a/b/g */
+ if (pBSSList->uChannel > CB_MAX_CHANNEL_24G)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
+ else if (pBSSList->sERP.bERPExist == true)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
+ else
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
+
+ pBSSList->byRxRate = pRxPacket->byRxRate;
+ pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
+ pBSSList->uRSSI = pRxPacket->uRSSI;
+ pBSSList->bySQ = pRxPacket->bySQ;
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_ASSOC &&
+ /* assoc with BSS */
+ pBSSList == pMgmt->pCurrBSS)
+ bParsingQuiet = true;
+
+ WPA_ClearRSN(pBSSList);
+
+ if (pRSNWPA) {
+ unsigned int uLen = pRSNWPA->len + 2;
+
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
+ pBSSList->wWPALen = uLen;
+ memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
+ WPA_ParseRSN(pBSSList, pRSNWPA);
+ }
+ }
+
+ WPA2_ClearRSN(pBSSList);
+
+ if (pRSN) {
+ unsigned int uLen = pRSN->len + 2;
+
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
+ pBSSList->wRSNLen = uLen;
+ memcpy(pBSSList->byRSNIE, pRSN, uLen);
+ WPA2vParseRSN(pBSSList, pRSN);
+ }
+ }
+
+ if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2 ||
+ pBSSList->bWPA2Valid == true) {
+
+ PSKeyItem pTransmitKey = NULL;
+ bool bIs802_1x = false;
+
+ for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii++) {
+ if (pBSSList->abyAKMSSAuthType[ii] ==
+ WLAN_11i_AKMSS_802_1X) {
+ bIs802_1x = true;
+ break;
+ }
+ }
+ if (bIs802_1x == true &&
+ pSSID->len == ((PWLAN_IE_SSID) pMgmt->abyDesireSSID)->len &&
+ !memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pMgmt->abyDesireSSID)->abySSID,
+ pSSID->len)) {
+
+ bAdd_PMKID_Candidate((void *) pDevice,
+ pBSSList->abyBSSID,
+ &pBSSList->sRSNCapObj);
+
+ if (pDevice->bLinkPass == true &&
+ pMgmt->eCurrState == WMAC_STATE_ASSOC &&
+ (KeybGetTransmitKey(&(pDevice->sKey),
+ pDevice->abyBSSID,
+ PAIRWISE_KEY,
+ &pTransmitKey) == true ||
+ KeybGetTransmitKey(&(pDevice->sKey),
+ pDevice->abyBSSID,
+ GROUP_KEY,
+ &pTransmitKey) == true)) {
+ pDevice->gsPMKIDCandidate.StatusType =
+ Ndis802_11StatusType_PMKID_CandidateList;
+ pDevice->gsPMKIDCandidate.Version = 1;
- WPA2_ClearRSN(pBSSList);
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
+ }
+ }
+ }
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
+ if (pDevice->bUpdateBBVGA) {
+ /* Monitor if RSSI is too strong. */
+ pBSSList->byRSSIStatCnt = 0;
+ RFvRSSITodBm(pDevice, (u8) (pRxPacket->uRSSI),
+ &pBSSList->ldBmMAX);
+ pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
+ pBSSList->ldBmAverRange = pBSSList->ldBmMAX;
+ for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
+ pBSSList->ldBmAverage[ii] = 0;
}
- }
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) {
-
- PSKeyItem pTransmitKey = NULL;
- bool bIs802_1x = false;
-
- for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii ++) {
- if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) {
- bIs802_1x = true;
- break;
- }
- }
- if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
- ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
-
- bAdd_PMKID_Candidate((void *) pDevice,
- pBSSList->abyBSSID,
- &pBSSList->sRSNCapObj);
-
- if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) ||
- (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) {
- pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
- pDevice->gsPMKIDCandidate.Version = 1;
-
- }
-
- }
- }
- }
-
- if (pDevice->bUpdateBBVGA) {
- // Monitor if RSSI is too strong.
- pBSSList->byRSSIStatCnt = 0;
- RFvRSSITodBm(pDevice, (u8)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
- pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
- pBSSList->ldBmAverRange = pBSSList->ldBmMAX;
- for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
- pBSSList->ldBmAverage[ii] = 0;
- }
-
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-
- return true;
+
+ pBSSList->uIELength = uIELength;
+ if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
+ pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
+ memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
+
+ return true;
}
-/*+
- *
+/*
* Routine Description:
- * Update BSS set in known BSS list
+ * Update BSS set in known BSS list
*
* Return Value:
- * true if success.
- *
--*/
-// TODO: input structure modify
-
+ * true if success.
+ */
+/* TODO: input structure modify */
int BSSbUpdateToBSSList(struct vnt_private *pDevice,
u64 qwTimestamp,
u16 wBeaconInterval,
@@ -499,321 +504,306 @@ int BSSbUpdateToBSSList(struct vnt_private *pDevice,
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct vnt_rx_mgmt *pRxPacket =
- (struct vnt_rx_mgmt *)pRxPacketContext;
+ (struct vnt_rx_mgmt *) pRxPacketContext;
int ii, jj;
signed long ldBm, ldBmSum;
bool bParsingQuiet = false;
- if (pBSSList == NULL)
- return false;
+ if (!pBSSList)
+ return false;
pBSSList->qwBSSTimestamp = cpu_to_le64(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
- pBSSList->uChannel = byCurrChannel;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
-
- if ((pSSID->len != 0) && (pSSID->abySSID[0] != 0))
- memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- memcpy(pBSSList->abySuppRates, pSuppRates,pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL) {
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,pExtSuppRates->len + WLAN_IEHDR_LEN);
- } else {
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- // Check if BSS is 802.11a/b/g
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist == true) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- if(bChannelHit)
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = true;
- }
- }
-
- WPA_ClearRSN(pBSSList); //mike update
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
+ pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
+ pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
+ pBSSList->uClearCount = 0;
+ pBSSList->uChannel = byCurrChannel;
+
+ if (pSSID->len > WLAN_SSID_MAXLEN)
+ pSSID->len = WLAN_SSID_MAXLEN;
+
+ if (pSSID->len != 0 && pSSID->abySSID[0] != 0)
+ memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
+ memcpy(pBSSList->abySuppRates, pSuppRates,
+ pSuppRates->len + WLAN_IEHDR_LEN);
+
+ if (pExtSuppRates)
+ memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,
+ pExtSuppRates->len + WLAN_IEHDR_LEN);
+ else
+ memset(pBSSList->abyExtSuppRates, 0,
+ WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
+ pBSSList->sERP.byERP = psERP->byERP;
+ pBSSList->sERP.bERPExist = psERP->bERPExist;
+
+ /* Check if BSS is 802.11a/b/g */
+ if (pBSSList->uChannel > CB_MAX_CHANNEL_24G)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
+ else if (pBSSList->sERP.bERPExist == true)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
+ else
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
+
+ pBSSList->byRxRate = pRxPacket->byRxRate;
+ pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
+ if (bChannelHit)
+ pBSSList->uRSSI = pRxPacket->uRSSI;
+ pBSSList->bySQ = pRxPacket->bySQ;
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_ASSOC &&
+ /* assoc with BSS */
+ pBSSList == pMgmt->pCurrBSS)
+ bParsingQuiet = true;
+
+ WPA_ClearRSN(pBSSList); /* mike update */
+
+ if (pRSNWPA) {
+ unsigned int uLen = pRSNWPA->len + 2;
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
+ pBSSList->wWPALen = uLen;
+ memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
+ WPA_ParseRSN(pBSSList, pRSNWPA);
+ }
}
- }
- WPA2_ClearRSN(pBSSList); //mike update
+ WPA2_ClearRSN(pBSSList); /* mike update */
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
+ if (pRSN) {
+ unsigned int uLen = pRSN->len + 2;
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
+ pBSSList->wRSNLen = uLen;
+ memcpy(pBSSList->byRSNIE, pRSN, uLen);
+ WPA2vParseRSN(pBSSList, pRSN);
+ }
}
- }
-
- if (pRxPacket->uRSSI != 0) {
- RFvRSSITodBm(pDevice, (u8)(pRxPacket->uRSSI), &ldBm);
- // Monitor if RSSI is too strong.
- pBSSList->byRSSIStatCnt++;
- pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
- pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
- ldBmSum = 0;
- for (ii = 0, jj = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- pBSSList->ldBmMAX =
- max(pBSSList->ldBmAverage[ii], ldBm);
- ldBmSum +=
- pBSSList->ldBmAverage[ii];
- jj++;
+
+ if (pRxPacket->uRSSI != 0) {
+ RFvRSSITodBm(pDevice, (u8) (pRxPacket->uRSSI), &ldBm);
+ /* Monitor if RSSI is too strong. */
+ pBSSList->byRSSIStatCnt++;
+ pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
+ pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
+ ldBmSum = 0;
+ for (ii = 0, jj = 0; ii < RSSI_STAT_COUNT; ii++) {
+ if (pBSSList->ldBmAverage[ii] != 0) {
+ pBSSList->ldBmMAX =
+ max(pBSSList->ldBmAverage[ii], ldBm);
+ ldBmSum +=
+ pBSSList->ldBmAverage[ii];
+ jj++;
+ }
}
- }
- pBSSList->ldBmAverRange = ldBmSum /jj;
- }
+ pBSSList->ldBmAverRange = ldBmSum / jj;
+ }
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
+ pBSSList->uIELength = uIELength;
+ if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
+ pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
+ memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
- return true;
+ return true;
}
-/*+
- *
+/*
* Routine Description:
- * Search Node DB table to find the index of matched DstAddr
+ * Search Node DB table to find the index of matched DstAddr
*
* Return Value:
- * None
- *
--*/
-
+ * None
+ */
int BSSbIsSTAInNodeDB(struct vnt_private *pDevice,
- u8 *abyDstAddr, u32 *puNodeIndex)
+ u8 *abyDstAddr,
+ u32 *puNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
unsigned int ii;
- // Index = 0 reserved for AP Node
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (ether_addr_equal(abyDstAddr,
+ /* Index = 0 reserved for AP Node */
+ for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive &&
+ ether_addr_equal(abyDstAddr,
pMgmt->sNodeDBTable[ii].abyMACAddr)) {
- *puNodeIndex = ii;
- return true;
- }
- }
- }
+ *puNodeIndex = ii;
+ return true;
+ }
+ }
- return false;
+ return false;
};
-/*+
- *
+/*
* Routine Description:
- * Find an empty node and allocate it; if no empty node
- * is found, then use the most inactive one.
+ * Find an empty node and allocate it; if no empty node
+ * is found, then use the most inactive one.
*
* Return Value:
- * None
- *
--*/
+ * None
+ */
void BSSvCreateOneNode(struct vnt_private *pDevice, u32 *puNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- int ii;
+ int ii;
u32 BigestCount = 0;
u32 SelectIndex;
- struct sk_buff *skb;
-
- // Index = 0 reserved for AP Node (In STA mode)
- // Index = 0 reserved for Broadcast/MultiCast (In AP mode)
- SelectIndex = 1;
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) {
- BigestCount = pMgmt->sNodeDBTable[ii].uInActiveCount;
- SelectIndex = ii;
- }
- }
- else {
- break;
- }
- }
-
- // if not found replace uInActiveCount with the largest one.
- if ( ii == (MAX_NODE_NUM + 1)) {
- *puNodeIndex = SelectIndex;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Replace inactive node = %d\n", SelectIndex);
- // clear ps buffer
- if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- }
- }
- else {
- *puNodeIndex = ii;
- }
-
- memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
- pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
- // for AP mode PS queue
- skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
- pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
- pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
-};
+ struct sk_buff *skb;
+
+ /* Index = 0 reserved for AP Node (In STA mode)
+ Index = 0 reserved for Broadcast/MultiCast (In AP mode) */
+ SelectIndex = 1;
+ for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive) {
+ if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) {
+ BigestCount =
+ pMgmt->sNodeDBTable[ii].uInActiveCount;
+ SelectIndex = ii;
+ }
+ } else {
+ break;
+ }
+ }
-/*+
- *
+ /* if not found replace uInActiveCount with the largest one. */
+ if (ii == (MAX_NODE_NUM + 1)) {
+ *puNodeIndex = SelectIndex;
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Replace inactive node = %d\n", SelectIndex);
+ /* clear ps buffer */
+ if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next) {
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)))
+ dev_kfree_skb(skb);
+ }
+ } else {
+ *puNodeIndex = ii;
+ }
+
+ memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
+ pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
+ pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
+ /* for AP mode PS queue */
+ skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
+ pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
+ pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
+}
+
+/*
* Routine Description:
- * Remove Node by NodeIndex
+ * Remove Node by NodeIndex
*
*
* Return Value:
- * None
- *
--*/
-
+ * None
+ */
void BSSvRemoveOneNode(struct vnt_private *pDevice, u32 uNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- // clear context
- memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
- // clear tx bit map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-};
-/*+
- *
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)))
+ dev_kfree_skb(skb);
+ /* clear context */
+ memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
+ /* clear tx bit map */
+ pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &=
+ ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
+}
+
+/*
* Routine Description:
- * Update AP Node content in Index 0 of KnownNodeDB
+ * Update AP Node content in Index 0 of KnownNodeDB
*
*
* Return Value:
- * None
- *
--*/
-
-void BSSvUpdateAPNode(struct vnt_private *pDevice, u16 *pwCapInfo,
- PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates)
+ * None
+ */
+void BSSvUpdateAPNode(struct vnt_private *pDevice,
+ u16 *pwCapInfo,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u32 uRateLen = WLAN_RATES_MAXLEN;
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
-
- pMgmt->sNodeDBTable[0].bActive = true;
- if (pDevice->byBBType == BB_TYPE_11B) {
- uRateLen = WLAN_RATES_MAXLEN_11B;
- }
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- uRateLen);
- pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- uRateLen);
- RATEvParseMaxRate((void *) pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
- );
- memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
- pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
- // Auto rate fallback function initiation.
- // RATEbInit(pDevice);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pMgmt->sNodeDBTable[0].wTxDataRate = %d \n", pMgmt->sNodeDBTable[0].wTxDataRate);
+ memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
+
+ pMgmt->sNodeDBTable[0].bActive = true;
+ if (pDevice->byBBType == BB_TYPE_11B)
+ uRateLen = WLAN_RATES_MAXLEN_11B;
+ pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES) pSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates,
+ uRateLen);
+ pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES) pExtSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates,
+ uRateLen);
+ RATEvParseMaxRate((void *) pDevice,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates,
+ true,
+ &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
+ &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
+ &(pMgmt->sNodeDBTable[0].wSuppRate),
+ &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
+ &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate));
+ memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID,
+ WLAN_ADDR_LEN);
+ pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
+ pMgmt->sNodeDBTable[0].bShortPreamble =
+ WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
+ pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
+ /* Auto rate fallback function initiation.
+ * RATEbInit(pDevice); */
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"pMgmt->sNodeDBTable[0].wTxDataRate = %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
-};
+}
-/*+
- *
+/*
* Routine Description:
- * Add Multicast Node content in Index 0 of KnownNodeDB
+ * Add Multicast Node content in Index 0 of KnownNodeDB
*
*
* Return Value:
- * None
- *
--*/
-
+ * None
+ */
void BSSvAddMulticastNode(struct vnt_private *pDevice)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- if (!pDevice->bEnableHostWEP)
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
- memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
- RATEvParseMaxRate((void *) pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
- );
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
+ if (!pDevice->bEnableHostWEP)
+ memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
+ memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
+ pMgmt->sNodeDBTable[0].bActive = true;
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
+ skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
+ RATEvParseMaxRate((void *) pDevice,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates,
+ true,
+ &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
+ &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
+ &(pMgmt->sNodeDBTable[0].wSuppRate),
+ &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
+ &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate));
+ pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
+ pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
-};
+}
-/*+
- *
+/*
* Routine Description:
*
*
- * Second call back function to update Node DB info & AP link status
+ * Second call back function to update Node DB info & AP link status
*
*
* Return Value:
- * none.
- *
--*/
-
+ * none.
+ */
void BSSvSecondCallBack(struct work_struct *work)
{
struct vnt_private *pDevice = container_of(work,
@@ -828,342 +818,365 @@ void BSSvSecondCallBack(struct work_struct *work)
if (pDevice->Flags & fMP_DISCONNECTED)
return;
- spin_lock_irq(&pDevice->lock);
-
- pDevice->uAssocCount = 0;
-
- //Power Saving Mode Tx Burst
- if ( pDevice->bEnablePSMode == true ) {
- pDevice->ulPSModeWaitTx++;
- if ( pDevice->ulPSModeWaitTx >= 2 ) {
- pDevice->ulPSModeWaitTx = 0;
- pDevice->bPSModeTxBurst = false;
- }
- }
-
- pDevice->byERPFlag &=
- ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
-
- if (pDevice->wUseProtectCntDown > 0) {
- pDevice->wUseProtectCntDown --;
- }
- else {
- // disable protect mode
- pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
- }
-
-if(pDevice->byReAssocCount > 0) {
- pDevice->byReAssocCount++;
- if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
- printk("Re-association timeout!!!\n");
- pDevice->byReAssocCount = 0;
- // if(pDevice->bWPASuppWextEnabled == true)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- else if(pDevice->bLinkPass == true)
- pDevice->byReAssocCount = 0;
-}
+ spin_lock_irq(&pDevice->lock);
- pMgmt->eLastState = pMgmt->eCurrState ;
+ pDevice->uAssocCount = 0;
+
+ /* Power Saving Mode Tx Burst */
+ if (pDevice->bEnablePSMode == true) {
+ pDevice->ulPSModeWaitTx++;
+ if (pDevice->ulPSModeWaitTx >= 2) {
+ pDevice->ulPSModeWaitTx = 0;
+ pDevice->bPSModeTxBurst = false;
+ }
+ }
+
+ pDevice->byERPFlag &=
+ ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
+
+ if (pDevice->wUseProtectCntDown > 0) {
+ pDevice->wUseProtectCntDown--;
+ } else {
+ /* disable protect mode */
+ pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
+ }
+
+ if (pDevice->byReAssocCount > 0) {
+ pDevice->byReAssocCount++;
+ if (pDevice->byReAssocCount > 10 &&
+ pDevice->bLinkPass != true) { /* 10 sec timeout */
+ printk("Re-association timeout!!!\n");
+ pDevice->byReAssocCount = 0;
+ /* if (pDevice->bWPASuppWextEnabled == true) */
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
+ wireless_send_event(pDevice->dev, SIOCGIWAP,
+ &wrqu, NULL);
+ }
+ } else if (pDevice->bLinkPass == true) {
+ pDevice->byReAssocCount = 0;
+ }
+ }
+
+ pMgmt->eLastState = pMgmt->eCurrState;
s_uCalculateLinkQual(pDevice);
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
-
- if (pMgmt->sNodeDBTable[ii].bActive) {
- // Increase in-activity counter
- pMgmt->sNodeDBTable[ii].uInActiveCount++;
-
- if (ii > 0) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > MAX_INACTIVE_COUNT) {
- BSSvRemoveOneNode(pDevice, ii);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
- "Inactive timeout [%d] sec, STA index = [%d] remove\n", MAX_INACTIVE_COUNT, ii);
- continue;
- }
-
- if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) {
-
- pDevice->uAssocCount++;
-
- // check if Non ERP exist
- if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) {
- if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
- pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
- uLongPreambleSTACnt ++;
- }
- if (!pMgmt->sNodeDBTable[ii].bERPExist) {
- pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1);
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- }
- if (!pMgmt->sNodeDBTable[ii].bShortSlotTime)
- uNonShortSlotSTACnt++;
- }
- }
-
- // check if any STA in PS mode
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- uSleepySTACnt++;
-
- }
-
- // Rate fallback check
- if (!pDevice->bFixRate) {
- if (ii > 0) {
- // ii = 0 for multicast node (AP & Adhoc)
- RATEvTxRateFallBack((void *)pDevice,
- &(pMgmt->sNodeDBTable[ii]));
- }
- else {
- // ii = 0 reserved for unicast AP node (Infra STA)
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
- RATEvTxRateFallBack((void *)pDevice,
- &(pMgmt->sNodeDBTable[ii]));
- }
-
- }
-
- // check if pending PS queue
- if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending \n",
- ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- if ((ii >0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) {
- BSSvRemoveOneNode(pDevice, ii);
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove \n", ii);
- continue;
- }
- }
- }
-
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->byBBType == BB_TYPE_11G)) {
-
- // on/off protect mode
- if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
- if (!pDevice->bProtectMode) {
- MACvEnableProtectMD(pDevice);
- pDevice->bProtectMode = true;
- }
- }
- else {
- if (pDevice->bProtectMode) {
- MACvDisableProtectMD(pDevice);
- pDevice->bProtectMode = false;
- }
- }
- // on/off short slot time
-
- if (uNonShortSlotSTACnt > 0) {
- if (pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = false;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- }
- else {
- if (!pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = true;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- }
-
- // on/off barker long preamble mode
-
- if (uLongPreambleSTACnt > 0) {
- if (!pDevice->bBarkerPreambleMd) {
- MACvEnableBarkerPreambleMd(pDevice);
- pDevice->bBarkerPreambleMd = true;
- }
- }
- else {
- if (pDevice->bBarkerPreambleMd) {
- MACvDisableBarkerPreambleMd(pDevice);
- pDevice->bBarkerPreambleMd = false;
- }
- }
-
- }
-
- // Check if any STA in PS mode, enable DTIM multicast deliver
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (uSleepySTACnt > 0)
- pMgmt->sNodeDBTable[0].bPSEnable = true;
- else
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- }
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pCurrSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
-
- if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
-
- if (pDevice->bUpdateBBVGA) {
- s_vCheckSensitivity(pDevice);
- s_vCheckPreEDThreshold(pDevice);
- }
-
- if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
- (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) ) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[0];
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_CHANGE_BBSENSITIVITY,
- NULL);
- }
-
- if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
- pMgmt->sNodeDBTable[0].bActive = false;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- pDevice->bRoaming = true;
- pDevice->bIsRoaming = false;
-
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
- /* let wpa supplicant know AP may disconnect */
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- }
- else if (pItemSSID->len != 0) {
-//Davidwang
- if ((pDevice->bEnableRoaming == true)&&(!(pMgmt->Cisco_cckm))) {
-DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bRoaming %d, !\n", pDevice->bRoaming );
-DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
- if ((pDevice->bRoaming == true)&&(pDevice->bIsRoaming == true)){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fast Roaming ...\n");
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_SSID,
- pMgmt->abyDesireSSID);
- pDevice->uAutoReConnectTime = 0;
- pDevice->uIsroamingTime = 0;
- pDevice->bRoaming = false;
- }
- else if ((pDevice->bRoaming == false)&&(pDevice->bIsRoaming == true)) {
- pDevice->uIsroamingTime++;
- if (pDevice->uIsroamingTime >= 20)
- pDevice->bIsRoaming = false;
- }
-
- }
-else {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
- //network manager support need not do Roaming scan???
- if(pDevice->bWPASuppWextEnabled ==true)
- pDevice->uAutoReConnectTime = 0;
- }
- else {
- //mike use old encryption status for wpa reauthen
- if(pDevice->bWPADEVUp)
- pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming ...\n");
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_SSID,
- pMgmt->abyDesireSSID);
- pDevice->uAutoReConnectTime = 0;
- }
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescanning.
- if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
- }
- else {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- pDevice->uAutoReConnectTime = 0;
- };
- }
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
-
- if (pDevice->bUpdateBBVGA) {
- s_vCheckSensitivity(pDevice);
- s_vCheckPreEDThreshold(pDevice);
+ for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
+
+ if (pMgmt->sNodeDBTable[ii].bActive) {
+ /* Increase in-activity counter */
+ pMgmt->sNodeDBTable[ii].uInActiveCount++;
+
+ if (ii > 0) {
+ if (pMgmt->sNodeDBTable[ii].uInActiveCount >
+ MAX_INACTIVE_COUNT) {
+ BSSvRemoveOneNode(pDevice, ii);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Inactive timeout [%d] sec, STA index = [%d] remove\n",
+ MAX_INACTIVE_COUNT, ii);
+ continue;
+ }
+
+ if (pMgmt->sNodeDBTable[ii].eNodeState >=
+ NODE_ASSOC) {
+
+ pDevice->uAssocCount++;
+
+ /* check if Non ERP exist */
+ if (pMgmt->sNodeDBTable[ii].uInActiveCount <
+ ERP_RECOVER_COUNT) {
+ if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
+ pDevice->byERPFlag |=
+ WLAN_SET_ERP_BARKER_MODE(1);
+ uLongPreambleSTACnt++;
+ }
+ if (!pMgmt->sNodeDBTable[ii].bERPExist) {
+ pDevice->byERPFlag |=
+ WLAN_SET_ERP_NONERP_PRESENT(1);
+ pDevice->byERPFlag |=
+ WLAN_SET_ERP_USE_PROTECTION(1);
+ }
+ if (!pMgmt->sNodeDBTable[ii].bShortSlotTime)
+ uNonShortSlotSTACnt++;
+ }
+ }
+
+ /* check if any STA in PS mode */
+ if (pMgmt->sNodeDBTable[ii].bPSEnable)
+ uSleepySTACnt++;
+
+ }
+
+ /* Rate fallback check */
+ if (!pDevice->bFixRate) {
+ if (ii > 0) {
+ /* ii = 0 for multicast node (AP & Adhoc) */
+ RATEvTxRateFallBack((void *) pDevice,
+ &(pMgmt->sNodeDBTable[ii]));
+ } else if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
+ /* ii = 0 reserved for unicast AP node (Infra STA) */
+ RATEvTxRateFallBack((void *) pDevice,
+ &(pMgmt->sNodeDBTable[ii]));
+ }
+
+ }
+
+ /* check if pending PS queue */
+ if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Index= %d, Queue = %d pending\n",
+ ii,
+ pMgmt->sNodeDBTable[ii].wEnQueueCnt);
+ if (ii > 0 &&
+ pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15) {
+ BSSvRemoveOneNode(pDevice, ii);
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Pending many queues PS STA Index = %d remove\n",
+ ii);
+ continue;
+ }
+ }
}
- if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- }
- }
- }
- if (pDevice->bLinkPass == true) {
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA ||
- pDevice->fWPA_Authened == true) {
- if (++pDevice->tx_data_time_out > 40) {
- pDevice->tx_trigger = true;
+ }
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP &&
+ pDevice->byBBType == BB_TYPE_11G) {
+
+ /* on/off protect mode */
+ if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
+ if (!pDevice->bProtectMode) {
+ MACvEnableProtectMD(pDevice);
+ pDevice->bProtectMode = true;
+ }
+ } else if (pDevice->bProtectMode) {
+ MACvDisableProtectMD(pDevice);
+ pDevice->bProtectMode = false;
+ }
+ /* on/off short slot time */
+
+ if (uNonShortSlotSTACnt > 0) {
+ if (pDevice->bShortSlotTime) {
+ pDevice->bShortSlotTime = false;
+ BBvSetShortSlotTime(pDevice);
+ vUpdateIFS((void *) pDevice);
+ }
+ } else if (!pDevice->bShortSlotTime) {
+ pDevice->bShortSlotTime = true;
+ BBvSetShortSlotTime(pDevice);
+ vUpdateIFS((void *) pDevice);
+ }
+
+ /* on/off barker long preamble mode */
+
+ if (uLongPreambleSTACnt > 0) {
+ if (!pDevice->bBarkerPreambleMd) {
+ MACvEnableBarkerPreambleMd(pDevice);
+ pDevice->bBarkerPreambleMd = true;
+ }
+ } else if (pDevice->bBarkerPreambleMd) {
+ MACvDisableBarkerPreambleMd(pDevice);
+ pDevice->bBarkerPreambleMd = false;
+ }
+
+ }
+
+ /* Check if any STA in PS mode, enable DTIM multicast deliver */
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
+ if (uSleepySTACnt > 0)
+ pMgmt->sNodeDBTable[0].bPSEnable = true;
+ else
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
+ }
+
+ pItemSSID = (PWLAN_IE_SSID) pMgmt->abyDesireSSID;
+ pCurrSSID = (PWLAN_IE_SSID) pMgmt->abyCurrSSID;
+
+ if (pMgmt->eCurrMode == WMAC_MODE_STANDBY ||
+ pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
+
+ if (pMgmt->sNodeDBTable[0].bActive) { /* Assoc with BSS */
+
+ if (pDevice->bUpdateBBVGA) {
+ s_vCheckSensitivity(pDevice);
+ s_vCheckPreEDThreshold(pDevice);
+ }
- PSbSendNullPacket(pDevice);
+ if (pMgmt->sNodeDBTable[0].uInActiveCount >=
+ (LOST_BEACON_COUNT/2) &&
+ pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+ pDevice->byBBVGANew = pDevice->abyBBVGA[0];
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_CHANGE_BBSENSITIVITY,
+ NULL);
+ }
+
+ if (pMgmt->sNodeDBTable[0].uInActiveCount >=
+ LOST_BEACON_COUNT) {
+ pMgmt->sNodeDBTable[0].bActive = false;
+ pMgmt->eCurrMode = WMAC_MODE_STANDBY;
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ netif_stop_queue(pDevice->dev);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice,
+ MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_STS,
+ LEDSTS_SLOW);
+ pDevice->bRoaming = true;
+ pDevice->bIsRoaming = false;
+
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n",
+ pMgmt->sNodeDBTable[0].uInActiveCount);
+ /* let wpa supplicant know AP may disconnect */
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
+ wireless_send_event(pDevice->dev,
+ SIOCGIWAP,
+ &wrqu,
+ NULL);
+ }
+ }
+ } else if (pItemSSID->len != 0) {
+ /* Davidwang */
+ if ((pDevice->bEnableRoaming == true) &&
+ (!(pMgmt->Cisco_cckm))) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "bRoaming %d, !\n",
+ pDevice->bRoaming);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "bIsRoaming %d, !\n",
+ pDevice->bIsRoaming);
+ if ((pDevice->bRoaming == true) &&
+ (pDevice->bIsRoaming == true)) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Fast Roaming ...\n");
+ BSSvClearBSSList((void *) pDevice,
+ pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
+ pDevice->uAutoReConnectTime = 0;
+ pDevice->uIsroamingTime = 0;
+ pDevice->bRoaming = false;
+ } else if (pDevice->bRoaming == false &&
+ pDevice->bIsRoaming == true) {
+ pDevice->uIsroamingTime++;
+ if (pDevice->uIsroamingTime >= 20)
+ pDevice->bIsRoaming = false;
+ }
+ } else if (pDevice->uAutoReConnectTime < 10) {
+ pDevice->uAutoReConnectTime++;
+ /* network manager support need not do Roaming scan??? */
+ if (pDevice->bWPASuppWextEnabled == true)
+ pDevice->uAutoReConnectTime = 0;
+ } else {
+ /* mike use old encryption status for wpa reauthen */
+ if (pDevice->bWPADEVUp)
+ pDevice->eEncryptionStatus =
+ pDevice->eOldEncryptionStatus;
+
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Roaming ...\n");
+ BSSvClearBSSList((void *) pDevice,
+ pDevice->bLinkPass);
+ pMgmt->eScanType = WMAC_SCAN_ACTIVE;
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
+ pDevice->uAutoReConnectTime = 0;
+ }
+ }
+ }
+
+ if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
+ /* if adhoc started which essid is NULL string, rescanning. */
+ if (pMgmt->eCurrState == WMAC_STATE_STARTED &&
+ pCurrSSID->len == 0) {
+ if (pDevice->uAutoReConnectTime < 10) {
+ pDevice->uAutoReConnectTime++;
+ } else {
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Adhoc re-scanning ...\n");
+ pMgmt->eScanType = WMAC_SCAN_ACTIVE;
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID, NULL);
+ pDevice->uAutoReConnectTime = 0;
+ }
+ }
+ if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- pDevice->tx_trigger = false;
- pDevice->tx_data_time_out = 0;
+ if (pDevice->bUpdateBBVGA) {
+ s_vCheckSensitivity(pDevice);
+ s_vCheckPreEDThreshold(pDevice);
+ }
+ if (pMgmt->sNodeDBTable[0].uInActiveCount >=
+ ADHOC_LOST_BEACON_COUNT) {
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Lost other STA beacon [%d] sec, started !\n",
+ pMgmt->sNodeDBTable[0].uInActiveCount);
+ pMgmt->sNodeDBTable[0].uInActiveCount = 0;
+ pMgmt->eCurrState = WMAC_STATE_STARTED;
+ netif_stop_queue(pDevice->dev);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice,
+ MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_STS,
+ LEDSTS_SLOW);
}
}
+ }
+
+ if (pDevice->bLinkPass == true) {
+ if ((pMgmt->eAuthenMode < WMAC_AUTH_WPA ||
+ pDevice->fWPA_Authened == true) &&
+ (++pDevice->tx_data_time_out > 40)) {
+ pDevice->tx_trigger = true;
+
+ PSbSendNullPacket(pDevice);
+
+ pDevice->tx_trigger = false;
+ pDevice->tx_data_time_out = 0;
+ }
if (netif_queue_stopped(pDevice->dev))
netif_wake_queue(pDevice->dev);
}
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irq(&pDevice->lock);
schedule_delayed_work(&pDevice->second_callback_work, HZ);
}
-/*+
- *
+/*
* Routine Description:
*
*
- * Update Tx attemps, Tx failure counter in Node DB
+ * Update Tx attemps, Tx failure counter in Node DB
*
*
* Return Value:
- * none.
- *
--*/
-
-void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice,
- PSStatCounter pStatistic, u8 byTSR, u8 byPktNO)
+ * none.
+ */
+void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice, u8 byTSR, u8 byPktNO)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_pkt_info *pkt_info = pDevice->pkt_info;
u32 uNodeIndex = 0;
u8 byTxRetry;
u16 wRate;
@@ -1174,171 +1187,174 @@ void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice,
u8 byPktNum;
u16 wFIFOCtl;
- byPktNum = (byPktNO & 0x0F) >> 4;
- byTxRetry = (byTSR & 0xF0) >> 4;
- wRate = (u16) (byPktNO & 0xF0) >> 4;
- wFIFOCtl = pStatistic->abyTxPktInfo[byPktNum].wFIFOCtl;
- pbyDestAddr = (u8 *) &( pStatistic->abyTxPktInfo[byPktNum].abyDestAddr[0]);
-
- if (wFIFOCtl & FIFOCTL_AUTO_FB_0) {
- byFallBack = AUTO_FB_0;
- } else if (wFIFOCtl & FIFOCTL_AUTO_FB_1) {
- byFallBack = AUTO_FB_1;
- } else {
- byFallBack = AUTO_FB_NONE;
- }
-
- // Only Unicast using support rates
- if (wFIFOCtl & FIFOCTL_NEEDACK) {
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- pMgmt->sNodeDBTable[0].uTxAttempts += 1;
- if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
- // transmit success, TxAttempts at least plus one
- pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[0].uTxFailures ++;
- }
- pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE]+=byTxRetry;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- pMgmt->sNodeDBTable[0].uTxFail[wRate]+=byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ byPktNum = (byPktNO & 0x0F) >> 4;
+ byTxRetry = (byTSR & 0xF0) >> 4;
+ wRate = (u16) (byPktNO & 0xF0) >> 4;
+ wFIFOCtl = pkt_info[byPktNum].fifo_ctl;
+ pbyDestAddr = pkt_info[byPktNum].dest_addr;
+
+ if (wFIFOCtl & FIFOCTL_AUTO_FB_0)
+ byFallBack = AUTO_FB_0;
+ else if (wFIFOCtl & FIFOCTL_AUTO_FB_1)
+ byFallBack = AUTO_FB_1;
+ else
+ byFallBack = AUTO_FB_NONE;
+
+ /* Only Unicast using support rates */
+ if (wFIFOCtl & FIFOCTL_NEEDACK) {
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
+ pMgmt->sNodeDBTable[0].uTxAttempts += 1;
+ if (!(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
+ /* transmit success, TxAttempts at least plus one */
+ pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
+ if ((byFallBack == AUTO_FB_NONE) ||
+ (wRate < RATE_18M)) {
+ wFallBackRate = wRate;
+ } else if (byFallBack == AUTO_FB_0) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ } else if (byFallBack == AUTO_FB_1) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
+ }
+ pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++;
+ } else {
+ pMgmt->sNodeDBTable[0].uTxFailures++;
}
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate =
- awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate =
- awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry;
+ if (byTxRetry != 0) {
+ pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE] += byTxRetry;
+ if (byFallBack == AUTO_FB_NONE ||
+ wRate < RATE_18M) {
+ pMgmt->sNodeDBTable[0].uTxFail[wRate] += byTxRetry;
+ } else if (byFallBack == AUTO_FB_0) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ }
+ } else if (byFallBack == AUTO_FB_1) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ }
+ }
}
- }
- }
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
+ }
- if (BSSbIsSTAInNodeDB((void *) pDevice,
- pbyDestAddr,
- &uNodeIndex)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA ||
+ pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
+ BSSbIsSTAInNodeDB((void *) pDevice,
+ pbyDestAddr,
+ &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
- if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
- // transmit success, TxAttempts at least plus one
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFailures ++;
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE]+=byTxRetry;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate]+=byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- }
- }
- }
- }
- }
+ if (!(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
+ /* transmit success, TxAttempts at least plus one */
+ pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
+ if ((byFallBack == AUTO_FB_NONE) ||
+ (wRate < RATE_18M)) {
+ wFallBackRate = wRate;
+ } else if (byFallBack == AUTO_FB_0) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ } else if (byFallBack == AUTO_FB_1) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][4];
+ }
+ pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++;
+ } else {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++;
+ }
+ pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry;
+ if (byTxRetry != 0) {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE] += byTxRetry;
+ if ((byFallBack == AUTO_FB_NONE) ||
+ (wRate < RATE_18M)) {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate] += byTxRetry;
+ } else if (byFallBack == AUTO_FB_0) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
+ }
+ } else if (byFallBack == AUTO_FB_1) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
+ else
+ wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
+ }
+ }
+ }
+ }
+ }
}
-/*+
- *
+/*
* Routine Description:
- * Clear Nodes & skb in DB Table
+ * Clear Nodes & skb in DB Table
*
*
* Parameters:
- * In:
- * hDeviceContext - The adapter context.
- * uStartIndex - starting index
- * Out:
- * none
+ * In:
+ * hDeviceContext - The adapter context.
+ * uStartIndex - starting index
+ * Out:
+ * none
*
* Return Value:
- * None.
- *
--*/
-
+ * None.
+ */
void BSSvClearNodeDBTable(struct vnt_private *pDevice, u32 uStartIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- struct sk_buff *skb;
+ struct sk_buff *skb;
int ii;
- for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- // check if sTxPSQueue has been initial
- if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS skb != NULL %d\n", ii);
- dev_kfree_skb(skb);
- }
- }
- memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
- }
- }
-};
+ for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive) {
+ /* check if sTxPSQueue has been initial */
+ if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next) {
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue))) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "PS skb != NULL %d\n",
+ ii);
+ dev_kfree_skb(skb);
+ }
+ }
+ memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
+ }
+ }
+}
static void s_vCheckSensitivity(struct vnt_private *pDevice)
{
@@ -1346,82 +1362,87 @@ static void s_vCheckSensitivity(struct vnt_private *pDevice)
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int ii;
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
- /* Update BB register if RSSI is too strong */
- signed long LocalldBmAverage = 0;
- signed long uNumofdBm = 0;
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- uNumofdBm ++;
- LocalldBmAverage += pBSSList->ldBmAverage[ii];
- }
- }
- if (uNumofdBm > 0) {
- LocalldBmAverage = LocalldBmAverage/uNumofdBm;
- for (ii=0;ii<BB_VGA_LEVEL;ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LocalldBmAverage:%ld, %ld %02x\n", LocalldBmAverage, pDevice->ldBmThreshold[ii], pDevice->abyBBVGA[ii]);
- if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
- break;
- }
- }
- if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
- pDevice->uBBVGADiffCount++;
- if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand(pDevice,
- WLAN_CMD_CHANGE_BBSENSITIVITY,
- NULL);
- } else {
- pDevice->uBBVGADiffCount = 0;
- }
- }
- }
- }
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC ||
+ (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
+ pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID,
+ (PWLAN_IE_SSID) pMgmt->abyCurrSSID);
+ if (pBSSList) {
+ /* Update BB register if RSSI is too strong */
+ signed long LocalldBmAverage = 0;
+ signed long uNumofdBm = 0;
+ for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
+ if (pBSSList->ldBmAverage[ii] != 0) {
+ uNumofdBm++;
+ LocalldBmAverage += pBSSList->ldBmAverage[ii];
+ }
+ }
+ if (uNumofdBm > 0) {
+ LocalldBmAverage = LocalldBmAverage/uNumofdBm;
+ for (ii = 0; ii < BB_VGA_LEVEL; ii++) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"LocalldBmAverage:%ld, %ld %02x\n",
+ LocalldBmAverage,
+ pDevice->ldBmThreshold[ii],
+ pDevice->abyBBVGA[ii]);
+ if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) {
+ pDevice->byBBVGANew =
+ pDevice->abyBBVGA[ii];
+ break;
+ }
+ }
+ if (pDevice->byBBVGANew !=
+ pDevice->byBBVGACurrent) {
+ pDevice->uBBVGADiffCount++;
+ if (pDevice->uBBVGADiffCount >=
+ BB_VGA_CHANGE_THRESHOLD)
+ bScheduleCommand(pDevice,
+ WLAN_CMD_CHANGE_BBSENSITIVITY,
+ NULL);
+ } else {
+ pDevice->uBBVGADiffCount = 0;
+ }
+ }
+ }
+ }
}
static void s_uCalculateLinkQual(struct vnt_private *pDevice)
{
+ struct net_device_stats *stats = &pDevice->stats;
unsigned long TxOkRatio, TxCnt;
unsigned long RxOkRatio, RxCnt;
unsigned long RssiRatio;
+ unsigned long qual;
long ldBm;
-TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
- pDevice->scStatistic.TxRetryOkCount +
- pDevice->scStatistic.TxFailCount;
-RxCnt = pDevice->scStatistic.RxFcsErrCnt +
- pDevice->scStatistic.RxOkCnt;
-TxOkRatio = (TxCnt < 6) ? 4000:((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
-RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
-//decide link quality
-if(pDevice->bLinkPass !=true)
-{
- pDevice->scStatistic.LinkQuality = 0;
- pDevice->scStatistic.SignalStren = 0;
-}
-else
-{
- RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
- if(-ldBm < 50) {
- RssiRatio = 4000;
- }
- else if(-ldBm > 90) {
- RssiRatio = 0;
- }
- else {
- RssiRatio = (40-(-ldBm-50))*4000/40;
- }
- pDevice->scStatistic.SignalStren = RssiRatio/40;
- pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100;
-}
- pDevice->scStatistic.RxFcsErrCnt = 0;
- pDevice->scStatistic.RxOkCnt = 0;
- pDevice->scStatistic.TxFailCount = 0;
- pDevice->scStatistic.TxNoRetryOkCount = 0;
- pDevice->scStatistic.TxRetryOkCount = 0;
+ TxCnt = stats->tx_packets + pDevice->wstats.discard.retries;
+
+ RxCnt = stats->rx_packets + stats->rx_frame_errors;
+
+ TxOkRatio = (TxCnt < 6) ? 4000:((stats->tx_packets * 4000) / TxCnt);
+
+ RxOkRatio = (RxCnt < 6) ? 2000 :
+ ((stats->rx_packets * 2000) / RxCnt);
+
+ /* decide link quality */
+ if (pDevice->bLinkPass != true) {
+ pDevice->wstats.qual.qual = 0;
+ } else {
+ RFvRSSITodBm(pDevice, (u8) (pDevice->uCurrRSSI), &ldBm);
+ if (-ldBm < 50)
+ RssiRatio = 4000;
+ else if (-ldBm > 90)
+ RssiRatio = 0;
+ else
+ RssiRatio = (40-(-ldBm-50)) * 4000 / 40;
+
+ qual = (RssiRatio + TxOkRatio + RxOkRatio) / 100;
+ if (qual < 100)
+ pDevice->wstats.qual.qual = (u8) qual;
+ else
+ pDevice->wstats.qual.qual = 100;
+ }
}
void BSSvClearAnyBSSJoinRecord(struct vnt_private *pDevice)
@@ -1440,13 +1461,17 @@ static void s_vCheckPreEDThreshold(struct vnt_private *pDevice)
PKnownBSS pBSSList = NULL;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
- pDevice->byBBPreEDRSSI = (u8) (~(pBSSList->ldBmAverRange) + 1);
- BBvUpdatePreEDThreshold(pDevice, false);
- }
- }
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC ||
+ (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
+ pBSSList = BSSpAddrIsInBSSList(pDevice,
+ pMgmt->abyCurrBSSID,
+ (PWLAN_IE_SSID) pMgmt->abyCurrSSID);
+ if (pBSSList) {
+ pDevice->byBBPreEDRSSI =
+ (u8) (~(pBSSList->ldBmAverRange) + 1);
+ BBvUpdatePreEDThreshold(pDevice, false);
+ }
+ }
}
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index fc41855..8df3fb2 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -34,7 +34,6 @@
#include "80211hdr.h"
#include "80211mgr.h"
#include "card.h"
-#include "mib.h"
#define MAX_NODE_NUM 64
#define MAX_BSS_NUM 42
@@ -264,8 +263,7 @@ void BSSvUpdateAPNode(struct vnt_private *, u16 *pwCapInfo,
void BSSvSecondCallBack(struct work_struct *work);
-void BSSvUpdateNodeTxCounter(struct vnt_private *, PSStatCounter pStatistic,
- u8 byTSR, u8 byPktNO);
+void BSSvUpdateNodeTxCounter(struct vnt_private *, u8 byTSR, u8 byPktNO);
void BSSvRemoveOneNode(struct vnt_private *, u32 uNodeIndex);
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 19d3cf4..0d87728 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -66,7 +66,7 @@ static int msglevel =MSG_LEVEL_INFO;
//const u16 cwRXBCNTSFOff[MAX_RATE] =
//{17, 34, 96, 192, 34, 23, 17, 11, 8, 5, 4, 3};
-const u16 cwRXBCNTSFOff[MAX_RATE] =
+static const u16 cwRXBCNTSFOff[MAX_RATE] =
{192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3};
/*
@@ -75,52 +75,48 @@ const u16 cwRXBCNTSFOff[MAX_RATE] =
* Parameters:
* In:
* pDevice - The adapter to be set
- * uConnectionChannel - Channel to be set
+ * connection_channel - Channel to be set
* Out:
* none
*/
-void CARDbSetMediaChannel(struct vnt_private *pDevice, u32 uConnectionChannel)
+void CARDbSetMediaChannel(struct vnt_private *priv, u32 connection_channel)
{
- if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38
- if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL))
- uConnectionChannel = (CB_MAX_CHANNEL_24G+1);
- } else {
- if ((uConnectionChannel > CB_MAX_CHANNEL_24G) || (uConnectionChannel == 0)) // 1 ~ 14
- uConnectionChannel = 1;
- }
-
- // clear NAV
- MACvRegBitsOn(pDevice, MAC_REG_MACCR, MACCR_CLRNAV);
-
- // Set Channel[7] = 0 to tell H/W channel is changing now.
- MACvRegBitsOff(pDevice, MAC_REG_CHANNEL, 0x80);
-
- //if (pMgmt->uCurrChannel == uConnectionChannel)
- // return bResult;
-
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_SELECT_CHANNLE,
- (u16) uConnectionChannel,
- 0,
- 0,
- NULL
- );
+ if (priv->byBBType == BB_TYPE_11A) {
+ if ((connection_channel < (CB_MAX_CHANNEL_24G + 1)) ||
+ (connection_channel > CB_MAX_CHANNEL))
+ connection_channel = (CB_MAX_CHANNEL_24G + 1);
+ } else {
+ if ((connection_channel > CB_MAX_CHANNEL_24G) ||
+ (connection_channel == 0))
+ connection_channel = 1;
+ }
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
+ /* clear NAV */
+ MACvRegBitsOn(priv, MAC_REG_MACCR, MACCR_CLRNAV);
+
+ /* Set Channel[7] = 0 to tell H/W channel is changing now. */
+ MACvRegBitsOff(priv, MAC_REG_CHANNEL, 0xb0);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_SELECT_CHANNLE,
+ connection_channel, 0, 0, NULL);
+
+ if (priv->byBBType == BB_TYPE_11A) {
+ priv->byCurPwr = 0xff;
+ RFbRawSetPower(priv,
+ priv->abyOFDMAPwrTbl[connection_channel-15], RATE_54M);
+ } else if (priv->byBBType == BB_TYPE_11G) {
+ priv->byCurPwr = 0xff;
+ RFbRawSetPower(priv,
+ priv->abyOFDMPwrTbl[connection_channel-1], RATE_54M);
+ } else {
+ priv->byCurPwr = 0xff;
+ RFbRawSetPower(priv,
+ priv->abyCCKPwrTbl[connection_channel-1], RATE_1M);
+ }
- if (pDevice->byBBType == BB_TYPE_11A) {
- pDevice->byCurPwr = 0xFF;
- RFbRawSetPower(pDevice, pDevice->abyOFDMAPwrTbl[uConnectionChannel-15], RATE_54M);
- } else if (pDevice->byBBType == BB_TYPE_11G) {
- pDevice->byCurPwr = 0xFF;
- RFbRawSetPower(pDevice, pDevice->abyOFDMPwrTbl[uConnectionChannel-1], RATE_54M);
- } else {
- pDevice->byCurPwr = 0xFF;
- RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M);
- }
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(u8)(uConnectionChannel|0x80));
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
+ (u8)(connection_channel|0x80));
}
/*
@@ -205,7 +201,7 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
* Return Value: none
*
*/
-void
+static void
CARDvCalculateOFDMRParameter (
u16 wRate,
u8 byBBType,
@@ -724,28 +720,20 @@ bool CARDbClearCurrentTSF(struct vnt_private *pDevice)
*/
u64 CARDqGetNextTBTT(u64 qwTSF, u16 wBeaconInterval)
{
+ u32 uBeaconInterval;
- unsigned int uLowNextTBTT;
- unsigned int uHighRemain, uLowRemain;
- unsigned int uBeaconInterval;
-
- uBeaconInterval = wBeaconInterval * 1024;
- // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval
- uLowNextTBTT = ((qwTSF & 0xffffffffU) >> 10) << 10;
- uLowRemain = (uLowNextTBTT) % uBeaconInterval;
- uHighRemain = ((0x80000000 % uBeaconInterval) * 2 * (u32)(qwTSF >> 32))
- % uBeaconInterval;
- uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval;
- uLowRemain = uBeaconInterval - uLowRemain;
+ uBeaconInterval = wBeaconInterval * 1024;
- // check if carry when add one beacon interval
- if ((~uLowNextTBTT) < uLowRemain)
- qwTSF = ((qwTSF >> 32) + 1) << 32;
-
- qwTSF = (qwTSF & 0xffffffff00000000ULL) |
- (u64)(uLowNextTBTT + uLowRemain);
+ /* Next TBTT =
+ * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
+ */
+ if (uBeaconInterval) {
+ do_div(qwTSF, uBeaconInterval);
+ qwTSF += 1;
+ qwTSF *= uBeaconInterval;
+ }
- return (qwTSF);
+ return qwTSF;
}
/*
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index e430b35..5a4fa0e 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -423,8 +423,7 @@ void CHvInitChannelTable(struct vnt_private *pDevice)
break;
}
- if ((pDevice->dwDiagRefCount != 0) ||
- (pDevice->b11hEable == true)) {
+ if (pDevice->b11hEable == true) {
if (bMultiBand == true) {
for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
sChannelTbl[ii+1].bValid = true;
diff --git a/drivers/staging/vt6656/datarate.c b/drivers/staging/vt6656/datarate.c
index af9eab0..547db6f 100644
--- a/drivers/staging/vt6656/datarate.c
+++ b/drivers/staging/vt6656/datarate.c
@@ -45,7 +45,7 @@
/* static int msglevel = MSG_LEVEL_DEBUG; */
static int msglevel = MSG_LEVEL_INFO;
-const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18,
+static const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18,
0x24, 0x30, 0x48, 0x60, 0x6C};
#define AUTORATE_TXOK_CNT 0x0400
diff --git a/drivers/staging/vt6656/datarate.h b/drivers/staging/vt6656/datarate.h
index 43cb778..96252ad 100644
--- a/drivers/staging/vt6656/datarate.h
+++ b/drivers/staging/vt6656/datarate.h
@@ -52,7 +52,6 @@
#define RATE_48M 10
#define RATE_54M 11
#define RATE_AUTO 12
-#define MAX_RATE 12
void RATEvParseMaxRate(struct vnt_private *, PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pItemExtRates, int bUpdateBasicRate,
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index afe7074..7c6dd5f 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -146,14 +146,6 @@
/*
* TX FIFO header
*/
-
-typedef struct tagSTxShortBufHead {
- u16 wFIFOCtl;
- u16 wTimeStamp;
-} __attribute__ ((__packed__))
-STxShortBufHead, *PSTxShortBufHead;
-typedef const STxShortBufHead *PCSTxShortBufHead;
-
typedef struct tagSBEACONCtl {
u32 BufReady:1;
u32 TSF:15;
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 62b7de1..1f42257 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
@@ -44,6 +43,7 @@
#include <net/cfg80211.h>
#include <linux/timer.h>
#include <linux/usb.h>
+#include <linux/crc32.h>
#ifdef SIOCETHTOOL
#define DEVICE_ETHTOOL_IOCTL_SUPPORT
@@ -69,12 +69,12 @@
#include "tether.h"
#include "wmgr.h"
#include "wcmd.h"
-#include "mib.h"
#include "srom.h"
#include "rc4.h"
#include "desc.h"
#include "key.h"
#include "card.h"
+#include "rndis.h"
#define VNT_USB_VENDOR_ID 0x160a
#define VNT_USB_PRODUCT_ID 0x3184
@@ -149,11 +149,9 @@ typedef enum __device_msg_level {
MSG_LEVEL_DEBUG = 4 /* Only for debug purpose. */
} DEVICE_MSG_LEVEL, *PDEVICE_MSG_LEVEL;
-typedef enum __device_init_type {
- DEVICE_INIT_COLD = 0, /* cold init */
- DEVICE_INIT_RESET, /* reset init or Dx to D0 power remain */
- DEVICE_INIT_DXPL /* Dx to D0 power lost init */
-} DEVICE_INIT_TYPE, *PDEVICE_INIT_TYPE;
+#define DEVICE_INIT_COLD 0x0 /* cold init */
+#define DEVICE_INIT_RESET 0x1 /* reset init or Dx to D0 power remain */
+#define DEVICE_INIT_DXPL 0x2 /* Dx to D0 power lost init */
/* USB */
@@ -189,6 +187,12 @@ struct vnt_usb_send_context {
unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
+/* tx packet info for rxtx */
+struct vnt_tx_pkt_info {
+ u16 fifo_ctl;
+ u8 dest_addr[ETH_ALEN];
+};
+
/* structure got from configuration file as user-desired default settings */
typedef struct _DEFAULT_CONFIG {
signed int ZoneType;
@@ -430,6 +434,7 @@ struct vnt_private {
/* Variables to track resources for the BULK Out Pipe */
struct vnt_usb_send_context *apTD[CB_MAX_TX_DESC];
u32 cbTD;
+ struct vnt_tx_pkt_info pkt_info[16];
/* Variables to track resources for the Interrupt In Pipe */
INT_BUFFER intBuf;
@@ -467,16 +472,13 @@ struct vnt_private {
u8 byOriginalZonetype;
int bLinkPass; /* link status: OK or fail */
+ struct vnt_cmd_card_init init_command;
+ struct vnt_rsp_card_init init_response;
u8 abyCurrentNetAddr[ETH_ALEN];
u8 abyPermanentNetAddr[ETH_ALEN];
int bExistSWNetAddr;
- /* Adapter statistics */
- SStatCounter scStatistic;
- /* 802.11 counter */
- SDot11Counters s802_11Counter;
-
/* Maintain statistical debug info. */
unsigned long packetsReceived;
unsigned long packetsReceivedDropped;
@@ -596,7 +598,6 @@ struct vnt_private {
int bCCK;
int bEncryptionEnable;
- int bLongHeader;
int bShortSlotTime;
int bProtectMode;
int bNonERPPresent;
@@ -666,8 +667,6 @@ struct vnt_private {
u8 abyPRNG[WLAN_WEPMAX_KEYLEN+3];
u8 byKeyIndex;
- int bAES;
-
u32 uKeyLength;
u8 abyKey[WLAN_WEP232_KEYLEN];
@@ -695,7 +694,6 @@ struct vnt_private {
u8 byBBPreEDIndex;
int bRadioCmd;
- u32 dwDiagRefCount;
/* For FOE Tuning */
u8 byFOETuning;
diff --git a/drivers/staging/vt6656/device_cfg.h b/drivers/staging/vt6656/device_cfg.h
index a97f7bb..0b9d834 100644
--- a/drivers/staging/vt6656/device_cfg.h
+++ b/drivers/staging/vt6656/device_cfg.h
@@ -65,6 +65,8 @@ struct _version {
#define DEVICE_VERSION "1.19_12"
#endif
+#define MAX_RATE 12
+
/* config file */
#include <linux/fs.h>
#include <linux/fcntl.h>
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 75dc92d..eca04c0 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -38,6 +38,7 @@
*
*/
+#include "dpc.h"
#include "device.h"
#include "rxtx.h"
#include "tether.h"
@@ -59,7 +60,7 @@
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-const u8 acbyRxRate[MAX_RATE] =
+static const u8 acbyRxRate[MAX_RATE] =
{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
static u8 s_byGetRateIdx(u8 byRate);
@@ -291,12 +292,14 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
if (BytesToIndicate != FrameSize) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"------- WRONG Length 1\n");
+ pStats->rx_frame_errors++;
return false;
}
if ((BytesToIndicate > 2372) || (BytesToIndicate <= 40)) {
// Frame Size error drop this packet.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2\n");
+ pStats->rx_frame_errors++;
return false;
}
@@ -314,6 +317,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(BytesToIndicate < (*pwPLCP_Length)) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong PLCP Length %x\n", (int) *pwPLCP_Length);
+ pStats->rx_frame_errors++;
return false;
}
for ( ii=RATE_1M;ii<MAX_RATE;ii++) {
@@ -344,16 +348,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
FrameSize = *pwPLCP_Length;
pbyFrame = pbyDAddress + 8;
- // update receive statistic counter
-
- STAvUpdateRDStatCounter(&pDevice->scStatistic,
- *pbyRsr,
- *pbyNewRsr,
- *pbyRxSts,
- *pbyRxRate,
- pbyFrame,
- FrameSize
- );
pMACHeader = (struct ieee80211_hdr *) pbyFrame;
@@ -370,7 +364,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
if (!is_multicast_ether_addr(pMACHeader->addr1)) {
if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (struct ieee80211_hdr *) pbyFrame)) {
- pDevice->s802_11Counter.FrameDuplicateCount++;
return false;
}
@@ -450,14 +443,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
-
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
- pDevice->s802_11Counter.TKIPICVErrors++;
- } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) {
- pDevice->s802_11Counter.CCMPDecryptErrors++;
- } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_WEP)) {
-// pDevice->s802_11Counter.WEPICVErrorCount.QuadPart++;
- }
}
return false;
}
@@ -482,7 +467,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
) {
// defragment
bDeFragRx = WCTLbHandleFragment(pDevice, (struct ieee80211_hdr *) (pbyFrame), FrameSize, bIsWEP, bExtIV);
- pDevice->s802_11Counter.ReceivedFragmentCount++;
if (bDeFragRx) {
// defrag complete
// TODO skb, pbyFrame
@@ -760,8 +744,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(pDevice->bRxMICFail == true)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC comparison is fail!\n");
pDevice->bRxMICFail = false;
- //pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++;
- pDevice->s802_11Counter.TKIPLocalMICFailures++;
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n",
@@ -824,12 +806,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(dwRxTSC47_16 <= dwLocalTSC47_16) &&
!((dwRxTSC47_16 == 0) && (dwLocalTSC47_16 == 0xFFFFFFFF))) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC is illegal~~!\n ");
- if (pKey->byCipherSuite == KEY_CTL_TKIP)
- //pDevice->s802_11Counter.TKIPReplays.QuadPart++;
- pDevice->s802_11Counter.TKIPReplays++;
- else
- //pDevice->s802_11Counter.CCMPReplays.QuadPart++;
- pDevice->s802_11Counter.CCMPReplays++;
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
@@ -1061,19 +1037,9 @@ static int s_bHandleRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
if (pKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey == NULL\n");
- if (byDecMode == KEY_CTL_WEP) {
-// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
-// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
- }
return false;
}
if (byDecMode != pKey->byCipherSuite) {
- if (byDecMode == KEY_CTL_WEP) {
-// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
-// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
- }
*pKeyOut = NULL;
return false;
}
@@ -1164,11 +1130,6 @@ static int s_bHostWepRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"AES:%d %d %d\n", pMgmt->byCSSPK, pMgmt->byCSSGK, byDecMode);
if (byDecMode != pKey->byCipherSuite) {
- if (byDecMode == KEY_CTL_WEP) {
-// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
-// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
- }
return false;
}
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index a2b4ba6..e0e9386 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -33,7 +33,6 @@
*/
#include "int.h"
-#include "mib.h"
#include "tmacro.h"
#include "mac.h"
#include "power.h"
@@ -86,45 +85,46 @@ void INTnsProcessData(struct vnt_private *pDevice)
pINTData = (PSINTData) pDevice->intBuf.pDataBuf;
if (pINTData->byTSR0 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt0 & 0x0F),
- (u8)(pINTData->byPkt0>>4),
- pINTData->byTSR0);
+ if (pINTData->byTSR0 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR0,
pINTData->byPkt0);
/*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/
}
if (pINTData->byTSR1 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt1 & 0x0F),
- (u8)(pINTData->byPkt1>>4),
- pINTData->byTSR1);
+ if (pINTData->byTSR1 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR1,
pINTData->byPkt1);
/*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/
}
if (pINTData->byTSR2 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt2 & 0x0F),
- (u8)(pINTData->byPkt2>>4),
- pINTData->byTSR2);
+ if (pINTData->byTSR2 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR2,
pINTData->byPkt2);
/*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/
}
if (pINTData->byTSR3 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt3 & 0x0F),
- (u8)(pINTData->byPkt3>>4),
- pINTData->byTSR3);
+ if (pINTData->byTSR3 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR3,
pINTData->byPkt3);
/*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/
@@ -174,16 +174,6 @@ void INTnsProcessData(struct vnt_private *pDevice)
pINTData->byISR0,
pINTData->dwLoTSF,
pINTData->dwHiTSF)); */
-
- STAvUpdate802_11Counter(&pDevice->s802_11Counter,
- &pDevice->scStatistic,
- pINTData->byRTSSuccess,
- pINTData->byRTSFail,
- pINTData->byACKFail,
- pINTData->byFCSErr);
- STAvUpdateIsrStatCounter(&pDevice->scStatistic,
- pINTData->byISR0,
- pINTData->byISR1);
}
if (pINTData->byISR1 != 0)
if (pINTData->byISR1 & ISR_GPIO3)
@@ -193,10 +183,6 @@ void INTnsProcessData(struct vnt_private *pDevice)
pDevice->intBuf.uDataLen = 0;
pDevice->intBuf.bInUse = false;
- pStats->tx_packets = pDevice->scStatistic.ullTsrOK;
- pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes +
- pDevice->scStatistic.ullTxMulticastBytes +
- pDevice->scStatistic.ullTxBroadcastBytes;
- pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
- pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
+ pStats->tx_errors = pDevice->wstats.discard.retries;
+ pStats->tx_dropped = pDevice->wstats.discard.retries;
}
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 63917ab..3a68dfa 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -58,9 +58,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
long ldBm;
pDevice->wstats.status = pDevice->eOPMode;
- if (pDevice->scStatistic.LinkQuality > 100)
- pDevice->scStatistic.LinkQuality = 100;
- pDevice->wstats.qual.qual = (u8)pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
pDevice->wstats.qual.noise = 0;
@@ -68,7 +65,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.discard.nwid = 0;
pDevice->wstats.discard.code = 0;
pDevice->wstats.discard.fragment = 0;
- pDevice->wstats.discard.retries = pDevice->scStatistic.dwTsrErr;
pDevice->wstats.discard.misc = 0;
pDevice->wstats.miss.beacon = 0;
return &pDevice->wstats;
@@ -1568,10 +1564,8 @@ int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info,
goto out;
}
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)) {
- ret = -EFAULT;
- goto out;
- }
+
+ memcpy(pMgmt->abyWPAIE, extra, wrq->length);
pMgmt->wWPAIELen = wrq->length;
} else {
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
@@ -1597,13 +1591,11 @@ int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info,
wrq->length = 0;
if (pMgmt->wWPAIELen > 0) {
wrq->length = pMgmt->wWPAIELen;
- if (pMgmt->wWPAIELen <= space) {
- if (copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen)) {
- ret = -EFAULT;
- }
- } else {
+
+ if (pMgmt->wWPAIELen <= space)
+ memcpy(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen);
+ else
ret = -E2BIG;
- }
}
return ret;
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index aae228c..58edcae 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -67,7 +67,6 @@
#include "datarate.h"
#include "rf.h"
#include "firmware.h"
-#include "rndis.h"
#include "control.h"
#include "channel.h"
#include "int.h"
@@ -215,13 +214,12 @@ static void device_set_multi(struct net_device *dev);
static int device_close(struct net_device *dev);
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int device_init_registers(struct vnt_private *pDevice,
- DEVICE_INIT_TYPE InitType);
+static int device_init_registers(struct vnt_private *pDevice);
static bool device_init_defrag_cb(struct vnt_private *pDevice);
static void device_init_diversity_timer(struct vnt_private *pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
-static int ethtool_ioctl(struct net_device *dev, void *useraddr);
+static int ethtool_ioctl(struct net_device *dev, struct ifreq *);
static void device_free_tx_bufs(struct vnt_private *pDevice);
static void device_free_rx_bufs(struct vnt_private *pDevice);
static void device_free_int_bufs(struct vnt_private *pDevice);
@@ -296,343 +294,352 @@ static void device_init_diversity_timer(struct vnt_private *pDevice)
/*
* initialization of MAC & BBP registers
*/
-
-static int device_init_registers(struct vnt_private *pDevice,
- DEVICE_INIT_TYPE InitType)
+static int device_init_registers(struct vnt_private *pDevice)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_cmd_card_init *init_cmd = &pDevice->init_command;
+ struct vnt_rsp_card_init *init_rsp = &pDevice->init_response;
u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 abySNAP_RFC1042[ETH_ALEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
u8 abySNAP_Bridgetunnel[ETH_ALEN]
= {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
u8 byAntenna;
int ii;
- CMD_CARD_INIT sInitCmd;
int ntStatus = STATUS_SUCCESS;
- RSP_CARD_INIT sInitRsp;
u8 byTmp;
u8 byCalibTXIQ = 0, byCalibTXDC = 0, byCalibRXIQ = 0;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n",
+ DEVICE_INIT_COLD, pDevice->byPacketType);
+
spin_lock_irq(&pDevice->lock);
- if (InitType == DEVICE_INIT_COLD) {
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
- memcpy(pDevice->abySNAP_Bridgetunnel,
- abySNAP_Bridgetunnel,
- ETH_ALEN);
-
- if ( !FIRMWAREbCheckVersion(pDevice) ) {
- if (FIRMWAREbDownload(pDevice) == true) {
- if (FIRMWAREbBrach2Sram(pDevice) == false) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbBrach2Sram fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- } else {
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbDownload fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- }
- if ( !BBbVT3184Init(pDevice) ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- }
+ memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
+ memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
+ memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
+
+ if (!FIRMWAREbCheckVersion(pDevice)) {
+ if (FIRMWAREbDownload(pDevice) == true) {
+ if (FIRMWAREbBrach2Sram(pDevice) == false) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ " FIRMWAREbBrach2Sram fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ " FIRMWAREbDownload fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
+ }
- sInitCmd.byInitClass = (u8)InitType;
- sInitCmd.bExistSWNetAddr = (u8) pDevice->bExistSWNetAddr;
- for (ii = 0; ii < 6; ii++)
- sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii];
- sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit;
- sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit;
-
- /* issue card_init command to device */
- ntStatus = CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_CARDINIT,
- 0,
- 0,
- sizeof(CMD_CARD_INIT),
- (u8 *) &(sInitCmd));
-
- if ( ntStatus != STATUS_SUCCESS ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- if (InitType == DEVICE_INIT_COLD) {
+ if (!BBbVT3184Init(pDevice)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
- ntStatus = CONTROLnsRequestIn(pDevice,MESSAGE_TYPE_INIT_RSP,0,0,sizeof(RSP_CARD_INIT), (u8 *) &(sInitRsp));
+ init_cmd->init_class = DEVICE_INIT_COLD;
+ init_cmd->exist_sw_net_addr = (u8) pDevice->bExistSWNetAddr;
+ for (ii = 0; ii < 6; ii++)
+ init_cmd->sw_net_addr[ii] = pDevice->abyCurrentNetAddr[ii];
+ init_cmd->short_retry_limit = pDevice->byShortRetryLimit;
+ init_cmd->long_retry_limit = pDevice->byLongRetryLimit;
+
+ /* issue card_init command to device */
+ ntStatus = CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_CARDINIT, 0, 0,
+ sizeof(struct vnt_cmd_card_init), (u8 *)init_cmd);
+ if (ntStatus != STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
- if (ntStatus != STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Cardinit request in status fail!\n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
+ ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_INIT_RSP, 0, 0,
+ sizeof(struct vnt_rsp_card_init), (u8 *)init_rsp);
+ if (ntStatus != STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Cardinit request in status fail!\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
/* local ID for AES functions */
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_LOCALID,
- MESSAGE_REQUEST_MACREG,
- 1,
- &pDevice->byLocalID);
-
- if ( ntStatus != STATUS_SUCCESS ) {
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
+ ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ,
+ MAC_REG_LOCALID, MESSAGE_REQUEST_MACREG, 1,
+ &pDevice->byLocalID);
+ if (ntStatus != STATUS_SUCCESS) {
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
/* do MACbSoftwareReset in MACvInitialize */
/* force CCK */
- pDevice->bCCK = true;
+ pDevice->bCCK = true;
pDevice->bProtectMode = false;
/* only used in 11g type, sync with ERP IE */
- pDevice->bNonERPPresent = false;
- pDevice->bBarkerPreambleMd = false;
- if ( pDevice->bFixRate ) {
- pDevice->wCurrentRate = (u16) pDevice->uConnectionRate;
- } else {
- if ( pDevice->byBBType == BB_TYPE_11B )
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = RATE_54M;
- }
+ pDevice->bNonERPPresent = false;
+ pDevice->bBarkerPreambleMd = false;
+ if (pDevice->bFixRate) {
+ pDevice->wCurrentRate = (u16)pDevice->uConnectionRate;
+ } else {
+ if (pDevice->byBBType == BB_TYPE_11B)
+ pDevice->wCurrentRate = RATE_11M;
+ else
+ pDevice->wCurrentRate = RATE_54M;
+ }
- CHvInitChannelTable(pDevice);
+ CHvInitChannelTable(pDevice);
- pDevice->byTopOFDMBasicRate = RATE_24M;
- pDevice->byTopCCKBasicRate = RATE_1M;
+ pDevice->byTopOFDMBasicRate = RATE_24M;
+ pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byRevId = 0;
/* target to IF pin while programming to RF chip */
- pDevice->byCurPwr = 0xFF;
+ pDevice->byCurPwr = 0xFF;
- pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK];
- pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG];
+ pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK];
+ pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG];
/* load power table */
for (ii = 0; ii < 14; ii++) {
- pDevice->abyCCKPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL];
- if (pDevice->abyCCKPwrTbl[ii] == 0)
- pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
- pDevice->abyOFDMPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
- if (pDevice->abyOFDMPwrTbl[ii] == 0)
- pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
- }
+ pDevice->abyCCKPwrTbl[ii] =
+ pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL];
+
+ if (pDevice->abyCCKPwrTbl[ii] == 0)
+ pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
+ pDevice->abyOFDMPwrTbl[ii] =
+ pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
+ if (pDevice->abyOFDMPwrTbl[ii] == 0)
+ pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
+ }
/*
* original zonetype is USA, but custom zonetype is Europe,
* then need to recover 12, 13, 14 channels with 11 channel
*/
- if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&&
- (pDevice->byOriginalZonetype == ZoneType_USA)) {
+ if (((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
+ (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe)) &&
+ (pDevice->byOriginalZonetype == ZoneType_USA)) {
for (ii = 11; ii < 14; ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
- }
+ }
- pDevice->byOFDMPwrA = 0x34; /* same as RFbMA2829SelectChannel */
+ pDevice->byOFDMPwrA = 0x34; /* same as RFbMA2829SelectChannel */
- /* load OFDM A power table */
- for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
- pDevice->abyOFDMAPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL];
- if (pDevice->abyOFDMAPwrTbl[ii] == 0)
- pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA;
- }
+ /* load OFDM A power table */
+ for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
+ pDevice->abyOFDMAPwrTbl[ii] =
+ pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL];
+
+ if (pDevice->abyOFDMAPwrTbl[ii] == 0)
+ pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA;
+ }
- byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA];
- if (byAntenna & EEP_ANTINV)
- pDevice->bTxRxAntInv = true;
- else
- pDevice->bTxRxAntInv = false;
+ byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA];
- byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
+ if (byAntenna & EEP_ANTINV)
+ pDevice->bTxRxAntInv = true;
+ else
+ pDevice->bTxRxAntInv = false;
+
+ byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byAntenna == 0) /* if not set default is both */
- byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
-
- if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
- pDevice->byAntennaCount = 2;
- pDevice->byTxAntennaMode = ANT_B;
- pDevice->dwTxAntennaSel = 1;
- pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == true)
- pDevice->byRxAntennaMode = ANT_A;
- else
- pDevice->byRxAntennaMode = ANT_B;
-
- if (pDevice->bDiversityRegCtlON)
- pDevice->bDiversityEnable = true;
- else
- pDevice->bDiversityEnable = false;
- } else {
- pDevice->bDiversityEnable = false;
- pDevice->byAntennaCount = 1;
- pDevice->dwTxAntennaSel = 0;
- pDevice->dwRxAntennaSel = 0;
- if (byAntenna & EEP_ANTENNA_AUX) {
- pDevice->byTxAntennaMode = ANT_A;
- if (pDevice->bTxRxAntInv == true)
- pDevice->byRxAntennaMode = ANT_B;
- else
- pDevice->byRxAntennaMode = ANT_A;
- } else {
- pDevice->byTxAntennaMode = ANT_B;
- if (pDevice->bTxRxAntInv == true)
- pDevice->byRxAntennaMode = ANT_A;
- else
- pDevice->byRxAntennaMode = ANT_B;
- }
- }
- pDevice->ulDiversityNValue = 100*255;
- pDevice->ulDiversityMValue = 100*16;
- pDevice->byTMax = 1;
- pDevice->byTMax2 = 4;
- pDevice->ulSQ3TH = 0;
- pDevice->byTMax3 = 64;
+ byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
+
+ if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
+ pDevice->byAntennaCount = 2;
+ pDevice->byTxAntennaMode = ANT_B;
+ pDevice->dwTxAntennaSel = 1;
+ pDevice->dwRxAntennaSel = 1;
+
+ if (pDevice->bTxRxAntInv == true)
+ pDevice->byRxAntennaMode = ANT_A;
+ else
+ pDevice->byRxAntennaMode = ANT_B;
+
+ if (pDevice->bDiversityRegCtlON)
+ pDevice->bDiversityEnable = true;
+ else
+ pDevice->bDiversityEnable = false;
+ } else {
+ pDevice->bDiversityEnable = false;
+ pDevice->byAntennaCount = 1;
+ pDevice->dwTxAntennaSel = 0;
+ pDevice->dwRxAntennaSel = 0;
+
+ if (byAntenna & EEP_ANTENNA_AUX) {
+ pDevice->byTxAntennaMode = ANT_A;
+
+ if (pDevice->bTxRxAntInv == true)
+ pDevice->byRxAntennaMode = ANT_B;
+ else
+ pDevice->byRxAntennaMode = ANT_A;
+ } else {
+ pDevice->byTxAntennaMode = ANT_B;
+
+ if (pDevice->bTxRxAntInv == true)
+ pDevice->byRxAntennaMode = ANT_A;
+ else
+ pDevice->byRxAntennaMode = ANT_B;
+ }
+ }
+
+ pDevice->ulDiversityNValue = 100 * 255;
+ pDevice->ulDiversityMValue = 100 * 16;
+ pDevice->byTMax = 1;
+ pDevice->byTMax2 = 4;
+ pDevice->ulSQ3TH = 0;
+ pDevice->byTMax3 = 64;
/* get Auto Fall Back type */
- pDevice->byAutoFBCtrl = AUTO_FB_0;
+ pDevice->byAutoFBCtrl = AUTO_FB_0;
/* set SCAN Time */
- pDevice->uScanTime = WLAN_SCAN_MINITIME;
+ pDevice->uScanTime = WLAN_SCAN_MINITIME;
/* default Auto Mode */
/* pDevice->NetworkType = Ndis802_11Automode; */
- pDevice->eConfigPHYMode = PHY_TYPE_AUTO;
- pDevice->byBBType = BB_TYPE_11G;
+ pDevice->eConfigPHYMode = PHY_TYPE_AUTO;
+ pDevice->byBBType = BB_TYPE_11G;
/* initialize BBP registers */
- pDevice->ulTxPower = 25;
+ pDevice->ulTxPower = 25;
/* get channel range */
- pDevice->byMinChannel = 1;
- pDevice->byMaxChannel = CB_MAX_CHANNEL;
+ pDevice->byMinChannel = 1;
+ pDevice->byMaxChannel = CB_MAX_CHANNEL;
/* get RFType */
- pDevice->byRFType = sInitRsp.byRFType;
+ pDevice->byRFType = init_rsp->rf_type;
- if ((pDevice->byRFType & RF_EMU) != 0) {
+ if ((pDevice->byRFType & RF_EMU) != 0) {
/* force change RevID for VT3253 emu */
pDevice->byRevId = 0x80;
- }
+ }
/* load vt3266 calibration parameters in EEPROM */
- if (pDevice->byRFType == RF_VT3226D0) {
- if((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) &&
- (pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) {
- byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ];
- byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC];
- byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ];
- if( (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) ) {
+ if (pDevice->byRFType == RF_VT3226D0) {
+ if ((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) &&
+ (pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) {
+
+ byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ];
+ byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC];
+ byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ];
+ if (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) {
/* CR255, enable TX/RX IQ and DC compensation mode */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFF,
- 0x03);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xff,
+ 0x03);
/* CR251, TX I/Q Imbalance Calibration */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFB,
- byCalibTXIQ);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xfb,
+ byCalibTXIQ);
/* CR252, TX DC-Offset Calibration */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFC,
- byCalibTXDC);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xfC,
+ byCalibTXDC);
/* CR253, RX I/Q Imbalance Calibration */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFD,
- byCalibRXIQ);
- } else {
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xfd,
+ byCalibRXIQ);
+ } else {
/* CR255, turn off BB Calibration compensation */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFF,
- 0x0);
- }
- }
- }
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- pMgmt->uCurrChannel = pDevice->uChannel;
- pMgmt->uIBSSChannel = pDevice->uChannel;
- CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xff,
+ 0x0);
+ }
+ }
+ }
+
+ pMgmt->eScanType = WMAC_SCAN_PASSIVE;
+ pMgmt->uCurrChannel = pDevice->uChannel;
+ pMgmt->uIBSSChannel = pDevice->uChannel;
+ CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
/* get permanent network address */
- memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6);
+ memcpy(pDevice->abyPermanentNetAddr, init_rsp->net_addr, 6);
memcpy(pDevice->abyCurrentNetAddr,
- pDevice->abyPermanentNetAddr,
- ETH_ALEN);
+ pDevice->abyPermanentNetAddr, ETH_ALEN);
/* if exist SW network address, use it */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
pDevice->abyCurrentNetAddr);
- }
- /*
- * set BB and packet type at the same time
- * set Short Slot Time, xIFS, and RSPINF
- */
- if (pDevice->byBBType == BB_TYPE_11A) {
- CARDbAddBasicRate(pDevice, RATE_6M);
- pDevice->bShortSlotTime = true;
- } else {
- CARDbAddBasicRate(pDevice, RATE_1M);
- pDevice->bShortSlotTime = false;
- }
- BBvSetShortSlotTime(pDevice);
- CARDvSetBSSMode(pDevice);
+ /*
+ * set BB and packet type at the same time
+ * set Short Slot Time, xIFS, and RSPINF
+ */
+ if (pDevice->byBBType == BB_TYPE_11A) {
+ CARDbAddBasicRate(pDevice, RATE_6M);
+ pDevice->bShortSlotTime = true;
+ } else {
+ CARDbAddBasicRate(pDevice, RATE_1M);
+ pDevice->bShortSlotTime = false;
+ }
- if (pDevice->bUpdateBBVGA) {
- pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
- pDevice->byBBVGANew = pDevice->byBBVGACurrent;
- BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
- }
+ BBvSetShortSlotTime(pDevice);
+ CARDvSetBSSMode(pDevice);
- pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL];
- pDevice->bHWRadioOff = false;
- if ( (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0 ) {
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_GPIOCTL1,
- MESSAGE_REQUEST_MACREG,
- 1,
- &byTmp);
-
- if ( ntStatus != STATUS_SUCCESS ) {
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- if ( (byTmp & GPIO3_DATA) == 0 ) {
- pDevice->bHWRadioOff = true;
- MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- } else {
- MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- pDevice->bHWRadioOff = false;
- }
+ if (pDevice->bUpdateBBVGA) {
+ pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+ pDevice->byBBVGANew = pDevice->byBBVGACurrent;
- }
+ BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
+ }
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_TMLEN,0x38);
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL0,0x01);
+ pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL];
+ pDevice->bHWRadioOff = false;
- if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
- CARDbRadioPowerOff(pDevice);
- } else {
- CARDbRadioPowerOn(pDevice);
- }
+ if ((pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0) {
+ ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ,
+ MAC_REG_GPIOCTL1, MESSAGE_REQUEST_MACREG, 1, &byTmp);
- spin_unlock_irq(&pDevice->lock);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n");
- return true;
+ if (ntStatus != STATUS_SUCCESS) {
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
+
+ if ((byTmp & GPIO3_DATA) == 0) {
+ pDevice->bHWRadioOff = true;
+ MACvRegBitsOn(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ } else {
+ MACvRegBitsOff(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ pDevice->bHWRadioOff = false;
+ }
+
+ }
+
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_TMLEN, 0x38);
+
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+
+ MACvRegBitsOn(pDevice, MAC_REG_GPIOCTL0, 0x01);
+
+ if ((pDevice->bHWRadioOff == true) ||
+ (pDevice->bRadioControlOff == true)) {
+ CARDbRadioPowerOff(pDevice);
+ } else {
+ CARDbRadioPowerOn(pDevice);
+ }
+
+
+ spin_unlock_irq(&pDevice->lock);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n");
+
+ return true;
}
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
@@ -962,10 +969,10 @@ static int device_open(struct net_device *dev)
/* read config file */
Read_config_file(pDevice);
- if (device_init_registers(pDevice, DEVICE_INIT_COLD) == false) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n");
- goto free_all;
- }
+ if (device_init_registers(pDevice) == false) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n");
+ goto free_all;
+ }
device_set_multi(pDevice->dev);
@@ -1187,22 +1194,6 @@ out:
return NETDEV_TX_OK;
}
-static unsigned const ethernet_polynomial = 0x04c11db7U;
-static inline u32 ether_crc(int length, unsigned char *data)
-{
- int crc = -1;
-
- while(--length >= 0) {
- unsigned char current_octet = *data++;
- int bit;
- for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
- crc = (crc << 1) ^
- ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
- }
- }
- return crc;
-}
-
/* find out the start position of str2 from str1 */
static unsigned char *kstrstr(const unsigned char *str1,
const unsigned char *str2) {
@@ -1448,18 +1439,18 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCETHTOOL:
- return ethtool_ioctl(dev, (void *) rq->ifr_data);
+ return ethtool_ioctl(dev, rq);
}
return rc;
}
-static int ethtool_ioctl(struct net_device *dev, void *useraddr)
+static int ethtool_ioctl(struct net_device *dev, struct ifreq *rq)
{
u32 ethcmd;
- if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
+ if (copy_from_user(&ethcmd, rq->ifr_data, sizeof(ethcmd)))
return -EFAULT;
switch (ethcmd) {
@@ -1467,7 +1458,7 @@ static int ethtool_ioctl(struct net_device *dev, void *useraddr)
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
- if (copy_to_user(useraddr, &info, sizeof(info)))
+ if (copy_to_user(rq->ifr_data, &info, sizeof(info)))
return -EFAULT;
return 0;
}
diff --git a/drivers/staging/vt6656/mib.c b/drivers/staging/vt6656/mib.c
deleted file mode 100644
index 12333cd..0000000
--- a/drivers/staging/vt6656/mib.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: mib.c
- *
- * Purpose: Implement MIB Data Structure
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- * Functions:
- * STAvUpdateIstStatCounter - Update ISR statistic counter
- * STAvUpdateRDStatCounter - Update Rx statistic counter
- * STAvUpdateTDStatCounter - Update Tx statistic counter
- * STAvUpdateTDStatCounterEx - Update Tx statistic counter and copy tx data
- * STAvUpdate802_11Counter - Update 802.11 mib counter
- *
- * Revision History:
- *
- */
-
-#include "mac.h"
-#include "tether.h"
-#include "mib.h"
-#include "wctl.h"
-#include "baseband.h"
-
-static int msglevel =MSG_LEVEL_INFO;
-
-/*
- * Description: Update Isr Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * wisr - Interrupt status
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, u8 byIsr0, u8 byIsr1)
-{
- /**********************/
- /* ABNORMAL interrupt */
- /**********************/
- // not any IMR bit invoke irq
- if (byIsr0 == 0) {
- pStatistic->ISRStat.dwIsrUnknown++;
- return;
- }
-
- if (byIsr0 & ISR_ACTX) // ISR, bit0
- pStatistic->ISRStat.dwIsrTx0OK++; // TXDMA0 successful
-
- if (byIsr0 & ISR_BNTX) // ISR, bit2
- pStatistic->ISRStat.dwIsrBeaconTxOK++; // BeaconTx successful
-
- if (byIsr0 & ISR_RXDMA0) // ISR, bit3
- pStatistic->ISRStat.dwIsrRx0OK++; // Rx0 successful
-
- if (byIsr0 & ISR_TBTT) // ISR, bit4
- pStatistic->ISRStat.dwIsrTBTTInt++; // TBTT successful
-
- if (byIsr0 & ISR_SOFTTIMER) // ISR, bit6
- pStatistic->ISRStat.dwIsrSTIMERInt++;
-
- if (byIsr0 & ISR_WATCHDOG) // ISR, bit7
- pStatistic->ISRStat.dwIsrWatchDog++;
-
- if (byIsr1 & ISR_FETALERR) // ISR, bit8
- pStatistic->ISRStat.dwIsrUnrecoverableError++;
-
- if (byIsr1 & ISR_SOFTINT) // ISR, bit9
- pStatistic->ISRStat.dwIsrSoftInterrupt++; // software interrupt
-
- if (byIsr1 & ISR_MIBNEARFULL) // ISR, bit10
- pStatistic->ISRStat.dwIsrMIBNearfull++;
-
- if (byIsr1 & ISR_RXNOBUF) // ISR, bit11
- pStatistic->ISRStat.dwIsrRxNoBuf++; // Rx No Buff
-
-}
-
-/*
- * Description: Update Rx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byRSR - Rx Status
- * byNewRSR - Rx Status
- * pbyBuffer - Rx Buffer
- * cbFrameLength - Rx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- u8 byRSR, u8 byNewRSR,
- u8 byRxSts, u8 byRxRate,
- u8 * pbyBuffer, unsigned int cbFrameLength)
-{
- /* need change */
- struct ieee80211_hdr *pHeader = (struct ieee80211_hdr *)pbyBuffer;
-
- if (byRSR & RSR_ADDROK)
- pStatistic->dwRsrADDROk++;
- if (byRSR & RSR_CRCOK) {
- pStatistic->dwRsrCRCOk++;
- pStatistic->ullRsrOK++;
-
- if (cbFrameLength >= ETH_ALEN) {
- /* update counters in case of successful transmission */
- if (byRSR & RSR_ADDRBROAD) {
- pStatistic->ullRxBroadcastFrames++;
- pStatistic->ullRxBroadcastBytes +=
- (unsigned long long) cbFrameLength;
- }
- else if (byRSR & RSR_ADDRMULTI) {
- pStatistic->ullRxMulticastFrames++;
- pStatistic->ullRxMulticastBytes +=
- (unsigned long long) cbFrameLength;
- }
- else {
- pStatistic->ullRxDirectedFrames++;
- pStatistic->ullRxDirectedBytes +=
- (unsigned long long) cbFrameLength;
- }
- }
- }
-
- if(byRxRate==22) {
- pStatistic->CustomStat.ullRsr11M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr11MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "11M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr11M,
- (signed int) pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
- }
- else if(byRxRate==11) {
- pStatistic->CustomStat.ullRsr5M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr5MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 5M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr5M,
- (signed int) pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
- }
- else if(byRxRate==4) {
- pStatistic->CustomStat.ullRsr2M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr2MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 2M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr2M,
- (signed int) pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
- }
- else if(byRxRate==2){
- pStatistic->CustomStat.ullRsr1M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr1MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 1M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr1M,
- (signed int) pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
- }
- else if(byRxRate==12){
- pStatistic->CustomStat.ullRsr6M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr6MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 6M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr6M,
- (signed int) pStatistic->CustomStat.ullRsr6MCRCOk);
- }
- else if(byRxRate==18){
- pStatistic->CustomStat.ullRsr9M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr9MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 9M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr9M,
- (signed int) pStatistic->CustomStat.ullRsr9MCRCOk);
- }
- else if(byRxRate==24){
- pStatistic->CustomStat.ullRsr12M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr12MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "12M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr12M,
- (signed int) pStatistic->CustomStat.ullRsr12MCRCOk);
- }
- else if(byRxRate==36){
- pStatistic->CustomStat.ullRsr18M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr18MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "18M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr18M,
- (signed int) pStatistic->CustomStat.ullRsr18MCRCOk);
- }
- else if(byRxRate==48){
- pStatistic->CustomStat.ullRsr24M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr24MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "24M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr24M,
- (signed int) pStatistic->CustomStat.ullRsr24MCRCOk);
- }
- else if(byRxRate==72){
- pStatistic->CustomStat.ullRsr36M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr36MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "36M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr36M,
- (signed int) pStatistic->CustomStat.ullRsr36MCRCOk);
- }
- else if(byRxRate==96){
- pStatistic->CustomStat.ullRsr48M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr48MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "48M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr48M,
- (signed int) pStatistic->CustomStat.ullRsr48MCRCOk);
- }
- else if(byRxRate==108){
- pStatistic->CustomStat.ullRsr54M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr54MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "54M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr54M,
- (signed int) pStatistic->CustomStat.ullRsr54MCRCOk);
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "Unknown: Total[%d], CRCOK[%d]\n",
- (signed int) pStatistic->dwRsrRxPacket+1,
- (signed int)pStatistic->dwRsrCRCOk);
- }
-
- if (byRSR & RSR_BSSIDOK)
- pStatistic->dwRsrBSSIDOk++;
-
- if (byRSR & RSR_BCNSSIDOK)
- pStatistic->dwRsrBCNSSIDOk++;
- if (byRSR & RSR_IVLDLEN) //invalid len (> 2312 byte)
- pStatistic->dwRsrLENErr++;
- if (byRSR & RSR_IVLDTYP) //invalid packet type
- pStatistic->dwRsrTYPErr++;
- if ((byRSR & (RSR_IVLDTYP | RSR_IVLDLEN)) || !(byRSR & RSR_CRCOK))
- pStatistic->dwRsrErr++;
-
- if (byNewRSR & NEWRSR_DECRYPTOK)
- pStatistic->dwNewRsrDECRYPTOK++;
- if (byNewRSR & NEWRSR_CFPIND)
- pStatistic->dwNewRsrCFP++;
- if (byNewRSR & NEWRSR_HWUTSF)
- pStatistic->dwNewRsrUTSF++;
- if (byNewRSR & NEWRSR_BCNHITAID)
- pStatistic->dwNewRsrHITAID++;
- if (byNewRSR & NEWRSR_BCNHITAID0)
- pStatistic->dwNewRsrHITAID0++;
-
- // increase rx packet count
- pStatistic->dwRsrRxPacket++;
- pStatistic->dwRsrRxOctet += cbFrameLength;
-
- if (IS_TYPE_DATA(pbyBuffer)) {
- pStatistic->dwRsrRxData++;
- } else if (IS_TYPE_MGMT(pbyBuffer)){
- pStatistic->dwRsrRxManage++;
- } else if (IS_TYPE_CONTROL(pbyBuffer)){
- pStatistic->dwRsrRxControl++;
- }
-
- if (byRSR & RSR_ADDRBROAD)
- pStatistic->dwRsrBroadcast++;
- else if (byRSR & RSR_ADDRMULTI)
- pStatistic->dwRsrMulticast++;
- else
- pStatistic->dwRsrDirected++;
-
- if (WLAN_GET_FC_MOREFRAG(pHeader->frame_control))
- pStatistic->dwRsrRxFragment++;
-
- if (cbFrameLength < ETH_ZLEN + 4) {
- pStatistic->dwRsrRunt++;
- } else if (cbFrameLength == ETH_ZLEN + 4) {
- pStatistic->dwRsrRxFrmLen64++;
- }
- else if ((65 <= cbFrameLength) && (cbFrameLength <= 127)) {
- pStatistic->dwRsrRxFrmLen65_127++;
- }
- else if ((128 <= cbFrameLength) && (cbFrameLength <= 255)) {
- pStatistic->dwRsrRxFrmLen128_255++;
- }
- else if ((256 <= cbFrameLength) && (cbFrameLength <= 511)) {
- pStatistic->dwRsrRxFrmLen256_511++;
- }
- else if ((512 <= cbFrameLength) && (cbFrameLength <= 1023)) {
- pStatistic->dwRsrRxFrmLen512_1023++;
- } else if ((1024 <= cbFrameLength) &&
- (cbFrameLength <= ETH_FRAME_LEN + 4)) {
- pStatistic->dwRsrRxFrmLen1024_1518++;
- } else if (cbFrameLength > ETH_FRAME_LEN + 4) {
- pStatistic->dwRsrLong++;
- }
-}
-
-/*
- * Description: Update Tx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byTSR0 - Tx Status
- * byTSR1 - Tx Status
- * pbyBuffer - Tx Buffer
- * cbFrameLength - Tx Length
- * uIdx - Index of Tx DMA
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdateTDStatCounter (
- PSStatCounter pStatistic,
- u8 byPktNum,
- u8 byRate,
- u8 byTSR
- )
-{
- u8 byRetyCnt;
- // increase tx packet count
- pStatistic->dwTsrTxPacket++;
-
- byRetyCnt = (byTSR & 0xF0) >> 4;
- if (byRetyCnt != 0) {
- pStatistic->dwTsrRetry++;
- pStatistic->dwTsrTotalRetry += byRetyCnt;
- pStatistic->dwTxFail[byRate]+= byRetyCnt;
- pStatistic->dwTxFail[MAX_RATE] += byRetyCnt;
-
- if ( byRetyCnt == 0x1)
- pStatistic->dwTsrOnceRetry++;
- else
- pStatistic->dwTsrMoreThanOnceRetry++;
-
- if (byRetyCnt <= 8)
- pStatistic->dwTxRetryCount[byRetyCnt-1]++;
-
- }
- if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
-
- if (byRetyCnt < 2)
- pStatistic->TxNoRetryOkCount ++;
- else
- pStatistic->TxRetryOkCount ++;
-
- pStatistic->ullTsrOK++;
- pStatistic->CustomStat.ullTsrAllOK++;
- // update counters in case that successful transmit
- pStatistic->dwTxOk[byRate]++;
- pStatistic->dwTxOk[MAX_RATE]++;
-
- if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_BROAD ) {
- pStatistic->ullTxBroadcastFrames++;
- pStatistic->ullTxBroadcastBytes += pStatistic->abyTxPktInfo[byPktNum].wLength;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_MULTI ) {
- pStatistic->ullTxMulticastFrames++;
- pStatistic->ullTxMulticastBytes += pStatistic->abyTxPktInfo[byPktNum].wLength;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_UNI ) {
- pStatistic->ullTxDirectedFrames++;
- pStatistic->ullTxDirectedBytes += pStatistic->abyTxPktInfo[byPktNum].wLength;
- }
- }
- else {
-
- pStatistic->TxFailCount ++;
-
- pStatistic->dwTsrErr++;
- if (byTSR & TSR_RETRYTMO)
- pStatistic->dwTsrRetryTimeout++;
- if (byTSR & TSR_TMO)
- pStatistic->dwTsrTransmitTimeout++;
- }
-
- if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_BROAD ) {
- pStatistic->dwTsrBroadcast++;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_MULTI ) {
- pStatistic->dwTsrMulticast++;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_UNI ) {
- pStatistic->dwTsrDirected++;
- }
-}
-
-/*
- * Description: Update 802.11 mib counter
- *
- * Parameters:
- * In:
- * p802_11Counter - Pointer to 802.11 mib counter
- * pStatistic - Pointer to Statistic Counter Data Structure
- * dwCounter - hardware counter for 802.11 mib
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdate802_11Counter(
- PSDot11Counters p802_11Counter,
- PSStatCounter pStatistic,
- u8 byRTSSuccess,
- u8 byRTSFail,
- u8 byACKFail,
- u8 byFCSErr
- )
-{
- //p802_11Counter->TransmittedFragmentCount
- p802_11Counter->MulticastTransmittedFrameCount =
- (unsigned long long) (pStatistic->dwTsrBroadcast +
- pStatistic->dwTsrMulticast);
- p802_11Counter->FailedCount = (unsigned long long) (pStatistic->dwTsrErr);
- p802_11Counter->RetryCount = (unsigned long long) (pStatistic->dwTsrRetry);
- p802_11Counter->MultipleRetryCount =
- (unsigned long long) (pStatistic->dwTsrMoreThanOnceRetry);
- //p802_11Counter->FrameDuplicateCount
- p802_11Counter->RTSSuccessCount += (unsigned long long) byRTSSuccess;
- p802_11Counter->RTSFailureCount += (unsigned long long) byRTSFail;
- p802_11Counter->ACKFailureCount += (unsigned long long) byACKFail;
- p802_11Counter->FCSErrorCount += (unsigned long long) byFCSErr;
- //p802_11Counter->ReceivedFragmentCount
- p802_11Counter->MulticastReceivedFrameCount =
- (unsigned long long) (pStatistic->dwRsrBroadcast +
- pStatistic->dwRsrMulticast);
-}
-
-/*
- * Description: Clear 802.11 mib counter
- *
- * Parameters:
- * In:
- * pUsbCounter - Pointer to USB mib counter
- * ntStatus - URB status
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void STAvUpdateUSBCounter(PSUSBCounter pUsbCounter, int ntStatus)
-{
-
-// if ( ntStatus == USBD_STATUS_CRC ) {
- pUsbCounter->dwCrc++;
-// }
-
-}
diff --git a/drivers/staging/vt6656/mib.h b/drivers/staging/vt6656/mib.h
deleted file mode 100644
index 3537532..0000000
--- a/drivers/staging/vt6656/mib.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: mib.h
- *
- * Purpose: Implement MIB Data Structure
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __MIB_H__
-#define __MIB_H__
-
-#include "tether.h"
-#include "desc.h"
-
-//
-// USB counter
-//
-typedef struct tagSUSBCounter {
- u32 dwCrc;
-
-} SUSBCounter, *PSUSBCounter;
-
-//
-// 802.11 counter
-//
-
-typedef struct tagSDot11Counters {
- /* unsigned long Length; // Length of structure */
- unsigned long long TransmittedFragmentCount;
- unsigned long long MulticastTransmittedFrameCount;
- unsigned long long FailedCount;
- unsigned long long RetryCount;
- unsigned long long MultipleRetryCount;
- unsigned long long RTSSuccessCount;
- unsigned long long RTSFailureCount;
- unsigned long long ACKFailureCount;
- unsigned long long FrameDuplicateCount;
- unsigned long long ReceivedFragmentCount;
- unsigned long long MulticastReceivedFrameCount;
- unsigned long long FCSErrorCount;
- unsigned long long TKIPLocalMICFailures;
- unsigned long long TKIPRemoteMICFailures;
- unsigned long long TKIPICVErrors;
- unsigned long long TKIPReplays;
- unsigned long long CCMPFormatErrors;
- unsigned long long CCMPReplays;
- unsigned long long CCMPDecryptErrors;
- unsigned long long FourWayHandshakeFailures;
- /*
- * unsigned long long WEPUndecryptableCount;
- * unsigned long long WEPICVErrorCount;
- * unsigned long long DecryptSuccessCount;
- * unsigned long long DecryptFailureCount;
- */
-} SDot11Counters, *PSDot11Counters;
-
-//
-// MIB2 counter
-//
-typedef struct tagSMib2Counter {
- signed long ifIndex;
- char ifDescr[256]; // max size 255 plus zero ending
- // e.g. "interface 1"
- signed long ifType;
- signed long ifMtu;
- u32 ifSpeed;
- u8 ifPhysAddress[ETH_ALEN];
- signed long ifAdminStatus;
- signed long ifOperStatus;
- u32 ifLastChange;
- u32 ifInOctets;
- u32 ifInUcastPkts;
- u32 ifInNUcastPkts;
- u32 ifInDiscards;
- u32 ifInErrors;
- u32 ifInUnknownProtos;
- u32 ifOutOctets;
- u32 ifOutUcastPkts;
- u32 ifOutNUcastPkts;
- u32 ifOutDiscards;
- u32 ifOutErrors;
- u32 ifOutQLen;
- u32 ifSpecific;
-} SMib2Counter, *PSMib2Counter;
-
-// Value in the ifType entry
-#define WIRELESSLANIEEE80211b 6 //
-
-// Value in the ifAdminStatus/ifOperStatus entry
-#define UP 1 //
-#define DOWN 2 //
-#define TESTING 3 //
-
-//
-// RMON counter
-//
-typedef struct tagSRmonCounter {
- signed long etherStatsIndex;
- u32 etherStatsDataSource;
- u32 etherStatsDropEvents;
- u32 etherStatsOctets;
- u32 etherStatsPkts;
- u32 etherStatsBroadcastPkts;
- u32 etherStatsMulticastPkts;
- u32 etherStatsCRCAlignErrors;
- u32 etherStatsUndersizePkts;
- u32 etherStatsOversizePkts;
- u32 etherStatsFragments;
- u32 etherStatsJabbers;
- u32 etherStatsCollisions;
- u32 etherStatsPkt64Octets;
- u32 etherStatsPkt65to127Octets;
- u32 etherStatsPkt128to255Octets;
- u32 etherStatsPkt256to511Octets;
- u32 etherStatsPkt512to1023Octets;
- u32 etherStatsPkt1024to1518Octets;
- u32 etherStatsOwners;
- u32 etherStatsStatus;
-} SRmonCounter, *PSRmonCounter;
-
-//
-// Custom counter
-//
-typedef struct tagSCustomCounters {
- unsigned long Length;
-
- unsigned long long ullTsrAllOK;
-
- unsigned long long ullRsr11M;
- unsigned long long ullRsr5M;
- unsigned long long ullRsr2M;
- unsigned long long ullRsr1M;
-
- unsigned long long ullRsr11MCRCOk;
- unsigned long long ullRsr5MCRCOk;
- unsigned long long ullRsr2MCRCOk;
- unsigned long long ullRsr1MCRCOk;
-
- unsigned long long ullRsr54M;
- unsigned long long ullRsr48M;
- unsigned long long ullRsr36M;
- unsigned long long ullRsr24M;
- unsigned long long ullRsr18M;
- unsigned long long ullRsr12M;
- unsigned long long ullRsr9M;
- unsigned long long ullRsr6M;
-
- unsigned long long ullRsr54MCRCOk;
- unsigned long long ullRsr48MCRCOk;
- unsigned long long ullRsr36MCRCOk;
- unsigned long long ullRsr24MCRCOk;
- unsigned long long ullRsr18MCRCOk;
- unsigned long long ullRsr12MCRCOk;
- unsigned long long ullRsr9MCRCOk;
- unsigned long long ullRsr6MCRCOk;
-
-} SCustomCounters, *PSCustomCounters;
-
-//
-// Custom counter
-//
-typedef struct tagSISRCounters {
- unsigned long Length;
-
- u32 dwIsrTx0OK;
- u32 dwIsrAC0TxOK;
- u32 dwIsrBeaconTxOK;
- u32 dwIsrRx0OK;
- u32 dwIsrTBTTInt;
- u32 dwIsrSTIMERInt;
- u32 dwIsrWatchDog;
- u32 dwIsrUnrecoverableError;
- u32 dwIsrSoftInterrupt;
- u32 dwIsrMIBNearfull;
- u32 dwIsrRxNoBuf;
-
- u32 dwIsrUnknown; // unknown interrupt count
-
- u32 dwIsrRx1OK;
- u32 dwIsrATIMTxOK;
- u32 dwIsrSYNCTxOK;
- u32 dwIsrCFPEnd;
- u32 dwIsrATIMEnd;
- u32 dwIsrSYNCFlushOK;
- u32 dwIsrSTIMER1Int;
- /////////////////////////////////////
-} SISRCounters, *PSISRCounters;
-
-// Value in the etherStatsStatus entry
-#define VALID 1 //
-#define CREATE_REQUEST 2 //
-#define UNDER_CREATION 3 //
-#define INVALID 4 //
-
-//
-// Tx packet information
-//
-typedef struct tagSTxPktInfo {
- u8 byBroadMultiUni;
- u16 wLength;
- u16 wFIFOCtl;
- u8 abyDestAddr[ETH_ALEN];
-} STxPktInfo, *PSTxPktInfo;
-
-#define MAX_RATE 12
-//
-// statistic counter
-//
-typedef struct tagSStatCounter {
- //
- // ISR status count
- //
-
- SISRCounters ISRStat;
-
- // RSR status count
- //
- u32 dwRsrFrmAlgnErr;
- u32 dwRsrErr;
- u32 dwRsrCRCErr;
- u32 dwRsrCRCOk;
- u32 dwRsrBSSIDOk;
- u32 dwRsrADDROk;
- u32 dwRsrBCNSSIDOk;
- u32 dwRsrLENErr;
- u32 dwRsrTYPErr;
-
- u32 dwNewRsrDECRYPTOK;
- u32 dwNewRsrCFP;
- u32 dwNewRsrUTSF;
- u32 dwNewRsrHITAID;
- u32 dwNewRsrHITAID0;
-
- u32 dwRsrLong;
- u32 dwRsrRunt;
-
- u32 dwRsrRxControl;
- u32 dwRsrRxData;
- u32 dwRsrRxManage;
-
- u32 dwRsrRxPacket;
- u32 dwRsrRxOctet;
- u32 dwRsrBroadcast;
- u32 dwRsrMulticast;
- u32 dwRsrDirected;
- // 64-bit OID
- unsigned long long ullRsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullRxBroadcastBytes;
- unsigned long long ullRxMulticastBytes;
- unsigned long long ullRxDirectedBytes;
- unsigned long long ullRxBroadcastFrames;
- unsigned long long ullRxMulticastFrames;
- unsigned long long ullRxDirectedFrames;
-
- u32 dwRsrRxFragment;
- u32 dwRsrRxFrmLen64;
- u32 dwRsrRxFrmLen65_127;
- u32 dwRsrRxFrmLen128_255;
- u32 dwRsrRxFrmLen256_511;
- u32 dwRsrRxFrmLen512_1023;
- u32 dwRsrRxFrmLen1024_1518;
-
- // TSR status count
- //
- u32 dwTsrTotalRetry; // total collision retry count
- u32 dwTsrOnceRetry; // this packet only occur one collision
- u32 dwTsrMoreThanOnceRetry; // this packet occur more than one collision
- u32 dwTsrRetry; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- u32 dwTsrACKData;
- u32 dwTsrErr;
- u32 dwAllTsrOK;
- u32 dwTsrRetryTimeout;
- u32 dwTsrTransmitTimeout;
-
- u32 dwTsrTxPacket;
- u32 dwTsrTxOctet;
- u32 dwTsrBroadcast;
- u32 dwTsrMulticast;
- u32 dwTsrDirected;
-
- // RD/TD count
- u32 dwCntRxFrmLength;
- u32 dwCntTxBufLength;
-
- u8 abyCntRxPattern[16];
- u8 abyCntTxPattern[16];
-
- // Software check....
- u32 dwCntRxDataErr; // rx buffer data software compare CRC err count
- u32 dwCntDecryptErr; // rx buffer data software compare CRC err count
- u32 dwCntRxICVErr; // rx buffer data software compare CRC err count
-
- // 64-bit OID
- unsigned long long ullTsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullTxBroadcastFrames;
- unsigned long long ullTxMulticastFrames;
- unsigned long long ullTxDirectedFrames;
- unsigned long long ullTxBroadcastBytes;
- unsigned long long ullTxMulticastBytes;
- unsigned long long ullTxDirectedBytes;
-
- // for autorate
- u32 dwTxOk[MAX_RATE+1];
- u32 dwTxFail[MAX_RATE+1];
- u32 dwTxRetryCount[8];
-
- STxPktInfo abyTxPktInfo[16];
-
- SUSBCounter USB_EP0Stat;
- SUSBCounter USB_BulkInStat;
- SUSBCounter USB_BulkOutStat;
- SUSBCounter USB_InterruptStat;
-
- SCustomCounters CustomStat;
-
- //Tx count:
- unsigned long TxNoRetryOkCount; /* success tx no retry ! */
- unsigned long TxRetryOkCount; /* success tx but retry ! */
- unsigned long TxFailCount; /* fail tx ? */
- //Rx count:
- unsigned long RxOkCnt; /* success rx ! */
- unsigned long RxFcsErrCnt; /* fail rx ? */
- //statistic
- unsigned long SignalStren;
- unsigned long LinkQuality;
-
-} SStatCounter, *PSStatCounter;
-
-void STAvUpdateIsrStatCounter(PSStatCounter pStatistic,
- u8 byIsr0,
- u8 byIsr1);
-
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- u8 byRSR, u8 byNewRSR, u8 byRxSts,
- u8 byRxRate, u8 * pbyBuffer,
- unsigned int cbFrameLength);
-
-void STAvUpdateTDStatCounter(PSStatCounter pStatistic, u8 byPktNum,
- u8 byRate, u8 byTSR);
-
-void
-STAvUpdate802_11Counter(
- PSDot11Counters p802_11Counter,
- PSStatCounter pStatistic,
- u8 byRTSSuccess,
- u8 byRTSFail,
- u8 byACKFail,
- u8 byFCSErr
- );
-
-void STAvUpdateUSBCounter(PSUSBCounter pUsbCounter, int ntStatus);
-
-#endif /* __MIB_H__ */
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index d27fa43..1e8f64b 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -419,7 +419,7 @@ static u8 vt3226_channel_table1[CB_MAX_CHANNEL_24G][3] = {
///}}RobertYu
//{{RobertYu:20060502, TWIF 1.14, LO Current for 11b mode
-const u32 vt3226d0_lo_current_table[CB_MAX_CHANNEL_24G] = {
+static const u32 vt3226d0_lo_current_table[CB_MAX_CHANNEL_24G] = {
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x0235C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -597,7 +597,7 @@ static u8 vt3342_channel_table1[CB_MAX_CHANNEL][3] = {
*
-*/
-const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
+static const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
@@ -740,9 +740,6 @@ int RFbSetPower(struct vnt_private *priv, u32 rate, u32 channel)
int ret = true;
u8 power = priv->byCCKPwr;
- if (priv->dwDiagRefCount)
- return true;
-
if (channel == 0)
return -EINVAL;
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index 5cf5e73..3661f82 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -79,23 +79,23 @@ typedef struct _CMD_WRITE_MASK
u8 byMask;
} CMD_WRITE_MASK, *PCMD_WRITE_MASK;
-typedef struct _CMD_CARD_INIT
+struct vnt_cmd_card_init
{
- u8 byInitClass;
- u8 bExistSWNetAddr;
- u8 bySWNetAddr[6];
- u8 byShortRetryLimit;
- u8 byLongRetryLimit;
-} CMD_CARD_INIT, *PCMD_CARD_INIT;
-
-typedef struct _RSP_CARD_INIT
+ u8 init_class;
+ u8 exist_sw_net_addr;
+ u8 sw_net_addr[6];
+ u8 short_retry_limit;
+ u8 long_retry_limit;
+};
+
+struct vnt_rsp_card_init
{
- u8 byStatus;
- u8 byNetAddr[6];
- u8 byRFType;
- u8 byMinChannel;
- u8 byMaxChannel;
-} RSP_CARD_INIT, *PRSP_CARD_INIT;
+ u8 status;
+ u8 net_addr[6];
+ u8 rf_type;
+ u8 min_channel;
+ u8 max_channel;
+};
typedef struct _CMD_SET_KEY
{
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 35a3ddb..51fff89 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -64,16 +64,16 @@
static int msglevel = MSG_LEVEL_INFO;
-const u16 wTimeStampOff[2][MAX_RATE] = {
+static const u16 wTimeStampOff[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, // Long Preamble
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, // Short Preamble
};
-const u16 wFB_Opt0[2][5] = {
+static const u16 wFB_Opt0[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, // fallback_rate0
{RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, // fallback_rate1
};
-const u16 wFB_Opt1[2][5] = {
+static const u16 wFB_Opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, // fallback_rate0
{RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, // fallback_rate1
};
@@ -96,7 +96,7 @@ const u16 wFB_Opt1[2][5] = {
static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
u8 *pbyDestAddr, u16 wPktLength, u16 wFIFOCtl);
-static void *s_vGetFreeContext(struct vnt_private *pDevice);
+static struct vnt_usb_send_context *s_vGetFreeContext(struct vnt_private *);
static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, struct vnt_tx_buffer *tx_buffer,
@@ -118,8 +118,8 @@ static void s_vSWencryption(struct vnt_private *pDevice,
static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
u32 cbFrameLength, u16 wRate, int bNeedAck);
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
- u8 byPktType, u32 cbFrameLength, u16 wCurrentRate);
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+ u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate);
static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
@@ -136,48 +136,43 @@ static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
int bNeedAck, u8 byFBOption);
-static void *s_vGetFreeContext(struct vnt_private *pDevice)
+static struct vnt_usb_send_context
+ *s_vGetFreeContext(struct vnt_private *priv)
{
- struct vnt_usb_send_context *pContext = NULL;
- struct vnt_usb_send_context *pReturnContext = NULL;
+ struct vnt_usb_send_context *context = NULL;
int ii;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
-
- for (ii = 0; ii < pDevice->cbTD; ii++) {
- if (!pDevice->apTD[ii])
- return NULL;
- pContext = pDevice->apTD[ii];
- if (pContext->bBoolInUse == false) {
- pContext->bBoolInUse = true;
- memset(pContext->Data, 0, MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
- pReturnContext = pContext;
- break;
- }
- }
- if ( ii == pDevice->cbTD ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Free Tx Context\n");
- }
- return (void *) pReturnContext;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
+
+ for (ii = 0; ii < priv->cbTD; ii++) {
+ if (!priv->apTD[ii])
+ return NULL;
+
+ context = priv->apTD[ii];
+ if (context->bBoolInUse == false) {
+ context->bBoolInUse = true;
+ memset(context->Data, 0,
+ MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
+ return context;
+ }
+ }
+
+ if (ii == priv->cbTD)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Free Tx Context\n");
+
+ return NULL;
}
static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
u8 *pbyDestAddr, u16 wPktLength, u16 wFIFOCtl)
{
- PSStatCounter pStatistic = &pDevice->scStatistic;
-
- if (is_broadcast_ether_addr(pbyDestAddr))
- pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_BROAD;
- else if (is_multicast_ether_addr(pbyDestAddr))
- pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_MULTI;
- else
- pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_UNI;
-
- pStatistic->abyTxPktInfo[byPktNum].wLength = wPktLength;
- pStatistic->abyTxPktInfo[byPktNum].wFIFOCtl = wFIFOCtl;
- memcpy(pStatistic->abyTxPktInfo[byPktNum].abyDestAddr,
- pbyDestAddr,
- ETH_ALEN);
+ struct net_device_stats *stats = &pDevice->stats;
+ struct vnt_tx_pkt_info *pkt_info = pDevice->pkt_info;
+
+ pkt_info[byPktNum].fifo_ctl = wFIFOCtl;
+ memcpy(pkt_info[byPktNum].dest_addr, pbyDestAddr, ETH_ALEN);
+
+ stats->tx_bytes += wPktLength;
}
static void s_vFillTxKey(struct vnt_private *pDevice,
@@ -278,7 +273,7 @@ static void s_vFillTxKey(struct vnt_private *pDevice,
mic_hdr->tsc_15_0 = cpu_to_be16(pTransmitKey->wTSC15_0);
/* MICHDR1 */
- if (pDevice->bLongHeader)
+ if (ieee80211_has_a4(pMACHeader->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
else
mic_hdr->hlen = cpu_to_be16(22);
@@ -292,7 +287,7 @@ static void s_vFillTxKey(struct vnt_private *pDevice,
& 0xc78f);
mic_hdr->seq_ctrl = cpu_to_le16(pMACHeader->seq_ctrl & 0xf);
- if (pDevice->bLongHeader)
+ if (ieee80211_has_a4(pMACHeader->frame_control))
memcpy(mic_hdr->addr4, pMACHeader->addr4, ETH_ALEN);
}
}
@@ -343,24 +338,25 @@ static u16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
PK_TYPE_11GB 2
PK_TYPE_11GA 3
*/
-static u32 s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
- u32 cbFrameLength, u16 wRate, int bNeedAck)
+static u32 s_uGetTxRsvTime(struct vnt_private *priv, u8 pkt_type,
+ u32 frame_length, u16 rate, int need_ack)
{
- u32 uDataTime, uAckTime;
+ u32 data_time, ack_time;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
- if (byPktType == PK_TYPE_11B) {//llb,CCK mode
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (u16)pDevice->byTopCCKBasicRate);
- } else {//11g 2.4G OFDM mode & 11a 5G OFDM mode
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (u16)pDevice->byTopOFDMBasicRate);
- }
+ data_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ frame_length, rate);
- if (bNeedAck) {
- return (uDataTime + pDevice->uSIFS + uAckTime);
- }
- else {
- return uDataTime;
- }
+ if (pkt_type == PK_TYPE_11B)
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type, 14,
+ (u16)priv->byTopCCKBasicRate);
+ else
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type, 14,
+ (u16)priv->byTopOFDMBasicRate);
+
+ if (need_ack)
+ return data_time + priv->uSIFS + ack_time;
+
+ return data_time;
}
static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
@@ -371,37 +367,47 @@ static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
- u8 byRTSRsvType, u8 byPktType, u32 cbFrameLength, u16 wCurrentRate)
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+ u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate)
{
- u32 uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
+ u32 rrv_time, rts_time, cts_time, ack_time, data_time;
+
+ rrv_time = rts_time = cts_time = ack_time = data_time = 0;
+
+ data_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ frame_lenght, current_rate);
+
+ if (rsv_type == 0) {
+ rts_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 20, priv->byTopCCKBasicRate);
+ cts_time = ack_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 14, priv->byTopCCKBasicRate);
+ } else if (rsv_type == 1) {
+ rts_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 20, priv->byTopCCKBasicRate);
+ cts_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopCCKBasicRate);
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopOFDMBasicRate);
+ } else if (rsv_type == 2) {
+ rts_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 20, priv->byTopOFDMBasicRate);
+ cts_time = ack_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 14, priv->byTopOFDMBasicRate);
+ } else if (rsv_type == 3) {
+ cts_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopCCKBasicRate);
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopOFDMBasicRate);
+
+ rrv_time = cts_time + ack_time + data_time + 2 * priv->uSIFS;
+
+ return rrv_time;
+ }
- uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
+ rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->uSIFS;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
- if (byRTSRsvType == 0) { //RTSTxRrvTime_bb
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- }
- else if (byRTSRsvType == 1){ //RTSTxRrvTime_ba, only in 2.4GHZ
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- }
- else if (byRTSRsvType == 2) { //RTSTxRrvTime_aa
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- }
- else if (byRTSRsvType == 3) { //CTSTxRrvTime_ba, only in 2.4GHZ
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- uRrvTime = uCTSTime + uAckTime + uDataTime + 2*pDevice->uSIFS;
- return uRrvTime;
- }
-
- //RTSRrvTime
- uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS;
- return cpu_to_le16((u16)uRrvTime);
+ return cpu_to_le16((u16)rrv_time);
}
//byFreqType 0: 5GHz, 1:2.4Ghz
@@ -790,7 +796,6 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
{
struct vnt_tx_fifo_head *pFifoHead = &tx_buffer->fifo_head;
union vnt_tx_data_head *head = NULL;
- u32 cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
u16 wFifoCtl;
u8 byFBOption = AUTO_FB_NONE;
@@ -805,9 +810,6 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
if (!pFifoHead)
return 0;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
-
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (need_rts) {
struct vnt_rrv_time_rts *pBuf =
@@ -978,28 +980,19 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
bSoftWEP = true; /* WEP 256 */
}
- // Get pkt type
- if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
- if (pDevice->dwDiagRefCount == 0) {
- cb802_1_H_len = 8;
- } else {
- cb802_1_H_len = 2;
- }
- } else {
- cb802_1_H_len = 0;
- }
+ /* Get pkt type */
+ if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN)
+ cb802_1_H_len = 8;
+ else
+ cb802_1_H_len = 0;
cbFrameBodySize = uSkbPacketLen - ETH_HLEN + cb802_1_H_len;
//Set packet type
pTxBufHead->wFIFOCtl |= (u16)(byPktType<<8);
- if (pDevice->dwDiagRefCount != 0) {
- bNeedACK = false;
- pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
- } else { //if (pDevice->dwDiagRefCount != 0) {
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
+ (pDevice->eOPMode == OP_MODE_AP)) {
if (is_multicast_ether_addr(psEthHeader->h_dest)) {
bNeedACK = false;
pTxBufHead->wFIFOCtl =
@@ -1008,26 +1001,17 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
- }
- else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
- } //if (pDevice->dwDiagRefCount != 0) {
+ } else {
+ /* MSDUs in Infra mode always need ACK */
+ bNeedACK = true;
+ pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
+ }
pTxBufHead->wTimeStamp = DEFAULT_MSDU_LIFETIME_RES_64us;
- //Set FIFOCTL_LHEAD
- if (pDevice->bLongHeader)
- pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
-
//Set FRAGCTL_MACHDCNT
- if (pDevice->bLongHeader) {
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- } else {
- cbMACHdLen = WLAN_HDR_ADDR3_LEN;
- }
+ cbMACHdLen = WLAN_HDR_ADDR3_LEN;
+
pTxBufHead->wFragCtl |= (u16)(cbMACHdLen << 10);
//Set FIFOCTL_GrpAckPolicy
@@ -1183,24 +1167,19 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
}
}
- // 802.1H
- if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
- if (pDevice->dwDiagRefCount == 0) {
+ /* 802.1H */
+ if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
if ((psEthHeader->h_proto == cpu_to_be16(ETH_P_IPX)) ||
- (psEthHeader->h_proto == cpu_to_le16(0xF380))) {
+ (psEthHeader->h_proto == cpu_to_le16(0xF380)))
memcpy((u8 *) (pbyPayloadHead),
- abySNAP_Bridgetunnel, 6);
- } else {
- memcpy((u8 *) (pbyPayloadHead), &abySNAP_RFC1042[0], 6);
- }
- pbyType = (u8 *) (pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->h_proto), sizeof(u16));
- } else {
- memcpy((u8 *) (pbyPayloadHead), &(psEthHeader->h_proto), sizeof(u16));
+ abySNAP_Bridgetunnel, 6);
+ else
+ memcpy((u8 *) (pbyPayloadHead), &abySNAP_RFC1042[0], 6);
- }
+ pbyType = (u8 *) (pbyPayloadHead + 6);
- }
+ memcpy(pbyType, &(psEthHeader->h_proto), sizeof(u16));
+ }
if (pPacket != NULL) {
// Copy the Packet into a tx Buffer
@@ -1352,11 +1331,6 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
pMACHeader->duration_id = cpu_to_le16(wDuration);
- if (pDevice->bLongHeader) {
- PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr;
- pMACHeader->frame_control |= (FC_TODS | FC_FROMDS);
- memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN);
- }
pMACHeader->seq_ctrl = cpu_to_le16(pDevice->wSeqCounter << 4);
//Set FragNumber in Sequence Control
@@ -1409,7 +1383,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
u32 cbMacHdLen;
u16 wCurrentRate = RATE_1M;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
@@ -1494,7 +1468,6 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
@@ -1515,7 +1488,6 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -1659,20 +1631,17 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
struct vnt_tx_mgmt *pPacket)
{
struct vnt_beacon_buffer *pTX_Buffer;
+ struct vnt_tx_short_buf_head *short_head;
u32 cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
u32 cbHeaderSize = 0;
- u16 wTxBufSize = sizeof(STxShortBufHead);
- PSTxShortBufHead pTxBufHead;
struct ieee80211_hdr *pMACHeader;
- struct vnt_tx_datahead_ab *pTxDataHead;
u16 wCurrentRate;
u32 cbFrameBodySize;
u32 cbReqCount;
- u8 *pbyTxBufferAddr;
struct vnt_usb_send_context *pContext;
CMD_STATUS status;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
status = CMD_STATUS_RESOURCES;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
@@ -1680,49 +1649,50 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
}
pTX_Buffer = (struct vnt_beacon_buffer *)&pContext->Data[0];
- pbyTxBufferAddr = (u8 *)&(pTX_Buffer->wFIFOCtl);
+ short_head = &pTX_Buffer->short_head;
cbFrameBodySize = pPacket->cbPayloadLen;
- pTxBufHead = (PSTxShortBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxShortBufHead);
+ cbHeaderSize = sizeof(struct vnt_tx_short_buf_head);
- if (pDevice->byBBType == BB_TYPE_11A) {
- wCurrentRate = RATE_6M;
- pTxDataHead = (struct vnt_tx_datahead_ab *)
- (pbyTxBufferAddr + wTxBufSize);
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11A,
- &pTxDataHead->ab);
- //Get Duration and TimeStampOff
- pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
- PK_TYPE_11A, false);
- pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
- } else {
- wCurrentRate = RATE_1M;
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- pTxDataHead = (struct vnt_tx_datahead_ab *)
- (pbyTxBufferAddr + wTxBufSize);
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11B,
- &pTxDataHead->ab);
- //Get Duration and TimeStampOff
- pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
+ if (pDevice->byBBType == BB_TYPE_11A) {
+ wCurrentRate = RATE_6M;
+
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate,
+ PK_TYPE_11A, &short_head->ab);
+
+ /* Get Duration and TimeStampOff */
+ short_head->duration = s_uGetDataDuration(pDevice,
+ PK_TYPE_11A, false);
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(pDevice, wCurrentRate);
+ } else {
+ wCurrentRate = RATE_1M;
+ short_head->fifo_ctl |= FIFOCTL_11B;
+
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate,
+ PK_TYPE_11B, &short_head->ab);
+
+ /* Get Duration and TimeStampOff */
+ short_head->duration = s_uGetDataDuration(pDevice,
PK_TYPE_11B, false);
- pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
- }
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(pDevice, wCurrentRate);
+ }
- //Generate Beacon Header
- pMACHeader = (struct ieee80211_hdr *)(pbyTxBufferAddr + cbHeaderSize);
- memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
- pMACHeader->duration_id = 0;
- pMACHeader->seq_ctrl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++ ;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
+ /* Generate Beacon Header */
+ pMACHeader = &pTX_Buffer->hdr;
+
+ memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
+
+ pMACHeader->duration_id = 0;
+ pMACHeader->seq_ctrl = cpu_to_le16(pDevice->wSeqCounter << 4);
+ pDevice->wSeqCounter++;
+ if (pDevice->wSeqCounter > 0x0fff)
+ pDevice->wSeqCounter = 0;
cbReqCount = cbHeaderSize + WLAN_HDR_ADDR3_LEN + cbFrameBodySize;
@@ -1781,7 +1751,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
p80211Header = (PUWLAN_80211HDR)skb->data;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0 TX...NO CONTEXT!\n");
@@ -1892,7 +1862,6 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
@@ -1914,7 +1883,6 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
cbICVlen = 8;//MIC
cbMICHDR = sizeof(struct vnt_mic_hdr);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -2204,7 +2172,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (pContext == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG" pContext == NULL\n");
@@ -2529,7 +2497,7 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
u32 status;
u16 wKeepRate = pDevice->wCurrentRate;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
return false;
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index eecbe89..b3ee6d0 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -230,12 +230,20 @@ struct vnt_tx_buffer {
union vnt_tx_head tx_head;
} __packed;
+struct vnt_tx_short_buf_head {
+ u16 fifo_ctl;
+ u16 time_stamp;
+ struct vnt_phy_field ab;
+ u16 duration;
+ u16 time_stamp_off;
+} __packed;
+
struct vnt_beacon_buffer {
u8 byType;
u8 byPKTNO;
u16 wTxByteCount;
- u16 wFIFOCtl;
- u16 wTimeStamp;
+ struct vnt_tx_short_buf_head short_head;
+ struct ieee80211_hdr hdr;
} __packed;
void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb);
diff --git a/drivers/staging/vt6656/tkip.c b/drivers/staging/vt6656/tkip.c
index 9d643e4..28282f3 100644
--- a/drivers/staging/vt6656/tkip.c
+++ b/drivers/staging/vt6656/tkip.c
@@ -39,7 +39,7 @@
/* The 2nd table is the same as the 1st but with the upper and lower */
/* bytes swapped. To allow an endian tolerant implementation, the byte */
/* halves have been expressed independently here. */
-const u8 TKIP_Sbox_Lower[256] = {
+static const u8 TKIP_Sbox_Lower[256] = {
0xA5,0x84,0x99,0x8D,0x0D,0xBD,0xB1,0x54,
0x50,0x03,0xA9,0x7D,0x19,0x62,0xE6,0x9A,
0x45,0x9D,0x40,0x87,0x15,0xEB,0xC9,0x0B,
@@ -74,7 +74,7 @@ const u8 TKIP_Sbox_Lower[256] = {
0xC3,0xB0,0x77,0x11,0xCB,0xFC,0xD6,0x3A
};
-const u8 TKIP_Sbox_Upper[256] = {
+static const u8 TKIP_Sbox_Upper[256] = {
0xC6,0xF8,0xEE,0xF6,0xFF,0xD6,0xDE,0x91,
0x60,0x02,0xCE,0x56,0xE7,0xB5,0x4D,0xEC,
0x8F,0x1F,0x89,0xFA,0xEF,0xB2,0x8E,0xFB,
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 5fc18ad..01cf099 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -390,8 +390,6 @@ static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
INTnsProcessData(pDevice);
}
- STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus);
-
if (pDevice->fKillEventPollingThread != true) {
usb_fill_bulk_urb(pDevice->pInterruptURB,
pDevice->usb,
@@ -499,8 +497,6 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
if (status) {
pDevice->ulBulkInError++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status);
-
- pDevice->scStatistic.RxFcsErrCnt ++;
//todo...xxxxxx
// if (status == USBD_STATUS_CRC) {
// pDevice->ulBulkInContCRCError++;
@@ -514,12 +510,8 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
bIndicateReceive = true;
pDevice->ulBulkInContCRCError = 0;
pDevice->ulBulkInBytesRead += bytesRead;
-
- pDevice->scStatistic.RxOkCnt ++;
}
- STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkInStat, status);
-
if (bIndicateReceive) {
spin_lock(&pDevice->lock);
if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == true)
@@ -655,8 +647,6 @@ static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
//
status = urb->status;
- //we should have failed, succeeded, or cancelled, but NOT be pending
- STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkOutStat, status);
if(status == STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 2f8e2a8..6b95229 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -55,8 +55,8 @@
#include "channel.h"
#include "iowpa.h"
-static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
+static int msglevel = MSG_LEVEL_INFO;
+//static int msglevel = MSG_LEVEL_DEBUG;
static void s_vProbeChannel(struct vnt_private *);
@@ -87,38 +87,33 @@ static void vAdHocBeaconStop(struct vnt_private *pDevice)
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int bStop;
- /*
- * temporarily stop Beacon packet for AdHoc Server
- * if all of the following coditions are met:
- * (1) STA is in AdHoc mode
- * (2) VT3253 is programmed as automatic Beacon Transmitting
- * (3) One of the following conditions is met
- * (3.1) AdHoc channel is in B/G band and the
- * current scan channel is in A band
- * or
- * (3.2) AdHoc channel is in A mode
- */
- bStop = false;
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED))
- {
- if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
- (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G))
- {
- bStop = true;
- }
- if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
- {
- bStop = true;
- }
- }
-
- if (bStop)
- {
- //PMESG(("STOP_BEACON: IBSSChannel = %u, ScanChannel = %u\n",
- // pMgmt->uIBSSChannel, pMgmt->uScanChannel));
- MACvRegBitsOff(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
+ /*
+ * temporarily stop Beacon packet for AdHoc Server
+ * if all of the following coditions are met:
+ * (1) STA is in AdHoc mode
+ * (2) VT3253 is programmed as automatic Beacon Transmitting
+ * (3) One of the following conditions is met
+ * (3.1) AdHoc channel is in B/G band and the
+ * current scan channel is in A band
+ * or
+ * (3.2) AdHoc channel is in A mode
+ */
+ bStop = false;
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
+ (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
+ if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
+ (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
+ bStop = true;
+ }
+ if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
+ bStop = true;
+ }
+
+ if (bStop) {
+ //PMESG(("STOP_BEACON: IBSSChannel = %u, ScanChannel = %u\n",
+ // pMgmt->uIBSSChannel, pMgmt->uScanChannel));
+ MACvRegBitsOff(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
+ }
} /* vAdHocBeaconStop */
@@ -145,12 +140,11 @@ static void vAdHocBeaconRestart(struct vnt_private *pDevice)
* (1) STA is in AdHoc mode
* (2) VT3253 is programmed as automatic Beacon Transmitting
*/
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED))
- {
- //PMESG(("RESTART_BEACON\n"));
- MACvRegBitsOn(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
+ (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
+ //PMESG(("RESTART_BEACON\n"));
+ MACvRegBitsOn(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
+ }
}
@@ -182,34 +176,33 @@ static void s_vProbeChannel(struct vnt_private *pDevice)
u8 *pbyRate;
int ii;
- if (pDevice->byBBType == BB_TYPE_11A) {
- pbyRate = &abyCurrSuppRatesA[0];
- } else if (pDevice->byBBType == BB_TYPE_11B) {
- pbyRate = &abyCurrSuppRatesB[0];
- } else {
- pbyRate = &abyCurrSuppRatesG[0];
- }
- // build an assocreq frame and send it
- pTxPacket = s_MgrMakeProbeRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyScanBSSID,
- (PWLAN_IE_SSID)pMgmt->abyScanSSID,
- (PWLAN_IE_SUPP_RATES)pbyRate,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRatesG
- );
-
- if (pTxPacket != NULL ){
- for (ii = 0; ii < 1 ; ii++) {
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request sending fail.. \n");
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request is sending.. \n");
- }
- }
- }
+ if (pDevice->byBBType == BB_TYPE_11A)
+ pbyRate = &abyCurrSuppRatesA[0];
+ else if (pDevice->byBBType == BB_TYPE_11B)
+ pbyRate = &abyCurrSuppRatesB[0];
+ else
+ pbyRate = &abyCurrSuppRatesG[0];
+
+ // build an assocreq frame and send it
+ pTxPacket = s_MgrMakeProbeRequest
+ (
+ pDevice,
+ pMgmt,
+ pMgmt->abyScanBSSID,
+ (PWLAN_IE_SSID)pMgmt->abyScanSSID,
+ (PWLAN_IE_SUPP_RATES)pbyRate,
+ (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRatesG
+ );
+
+ if (pTxPacket != NULL) {
+ for (ii = 0; ii < 1; ii++) {
+ if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request sending fail..\n");
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request is sending..\n");
+ }
+ }
+ }
}
@@ -224,7 +217,7 @@ static void s_vProbeChannel(struct vnt_private *pDevice)
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u8 *pScanBSSID, PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
@@ -236,37 +229,38 @@ struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
+ WLAN_PROBEREQ_FR_MAXLEN);
pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ sizeof(struct vnt_tx_mgmt));
- sFrame.pBuf = (u8 *)pTxPacket->p80211Header;
- sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
- vMgrEncodeProbeRequest(&sFrame);
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBEREQ)
- ));
- memcpy( sFrame.pHdr->sA3.abyAddr1, pScanBSSID, WLAN_ADDR_LEN);
- memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy( sFrame.pHdr->sA3.abyAddr3, pScanBSSID, WLAN_BSSID_LEN);
- // Copy the SSID, pSSID->len=0 indicate broadcast SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- // Copy the extension rate set
- if (pDevice->byBBType == BB_TYPE_11G) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
+ sFrame.pBuf = (u8 *)pTxPacket->p80211Header;
+ sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
+ vMgrEncodeProbeRequest(&sFrame);
+ sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
+ (
+ WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
+ WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBEREQ)
+ ));
+ memcpy(sFrame.pHdr->sA3.abyAddr1, pScanBSSID, WLAN_ADDR_LEN);
+ memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
+ memcpy(sFrame.pHdr->sA3.abyAddr3, pScanBSSID, WLAN_BSSID_LEN);
+ // Copy the SSID, pSSID->len=0 indicate broadcast SSID
+ sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
+ sFrame.len += pSSID->len + WLAN_IEHDR_LEN;
+ memcpy(sFrame.pSSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
+ sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
+ sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
+ memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
+ // Copy the extension rate set
+ if (pDevice->byBBType == BB_TYPE_11G) {
+ sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
+ sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
+ memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
+ }
+ pTxPacket->cbMPDULen = sFrame.len;
+ pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
+
+ return pTxPacket;
}
-void vCommandTimerWait(struct vnt_private *pDevice, unsigned long MSecond)
+static void
+vCommandTimerWait(struct vnt_private *pDevice, unsigned long MSecond)
{
schedule_delayed_work(&pDevice->run_command_work,
msecs_to_jiffies(MSecond));
@@ -289,661 +283,639 @@ void vRunCommand(struct work_struct *work)
if (pDevice->Flags & fMP_DISCONNECTED)
return;
- if (pDevice->dwDiagRefCount != 0)
- return;
- if (pDevice->bCmdRunning != true)
- return;
+ if (pDevice->bCmdRunning != true)
+ return;
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
- switch ( pDevice->eCommandState ) {
+ switch (pDevice->eCommandState) {
- case WLAN_CMD_SCAN_START:
+ case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
-
- if (pMgmt->uScanChannel == 0 ) {
- pMgmt->uScanChannel = pDevice->byMinChannel;
- }
- if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
- pDevice->eCommandState = WLAN_CMD_SCAN_END;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
-
- } else {
- if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel);
+ if (pDevice->bRadioOff == true) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
+
+ if (pMgmt->uScanChannel == 0)
+ pMgmt->uScanChannel = pDevice->byMinChannel;
+ if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
+ pDevice->eCommandState = WLAN_CMD_SCAN_END;
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ } else {
+ if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d\n", pMgmt->uScanChannel);
+ pMgmt->uScanChannel++;
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ if (pMgmt->uScanChannel == pDevice->byMinChannel) {
+ // pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark
+ pMgmt->abyScanBSSID[0] = 0xFF;
+ pMgmt->abyScanBSSID[1] = 0xFF;
+ pMgmt->abyScanBSSID[2] = 0xFF;
+ pMgmt->abyScanBSSID[3] = 0xFF;
+ pMgmt->abyScanBSSID[4] = 0xFF;
+ pMgmt->abyScanBSSID[5] = 0xFF;
+ pItemSSID->byElementID = WLAN_EID_SSID;
+ // clear bssid list
+ /* BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); */
+ pMgmt->eScanState = WMAC_IS_SCANNING;
+ pDevice->byScanBBType = pDevice->byBBType; //lucas
+ pDevice->bStopDataPkt = true;
+ // Turn off RCR_BSSID filter every time
+ MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
+ pDevice->byRxMode &= ~RCR_BSSID;
+ }
+ //lucas
+ vAdHocBeaconStop(pDevice);
+ if ((pDevice->byBBType != BB_TYPE_11A) &&
+ (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
+ pDevice->byBBType = BB_TYPE_11A;
+ CARDvSetBSSMode(pDevice);
+ } else if ((pDevice->byBBType == BB_TYPE_11A) &&
+ (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) {
+ pDevice->byBBType = BB_TYPE_11G;
+ CARDvSetBSSMode(pDevice);
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning.... channel: [%d]\n", pMgmt->uScanChannel);
+ // Set channel
+ CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel);
+ // Set Baseband to be more sensitive.
+
+ if (pDevice->bUpdateBBVGA) {
+ BBvSetShortSlotTime(pDevice);
+ BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
+ BBvUpdatePreEDThreshold(pDevice, true);
+ }
pMgmt->uScanChannel++;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if (pMgmt->uScanChannel == pDevice->byMinChannel) {
- // pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark
- pMgmt->abyScanBSSID[0] = 0xFF;
- pMgmt->abyScanBSSID[1] = 0xFF;
- pMgmt->abyScanBSSID[2] = 0xFF;
- pMgmt->abyScanBSSID[3] = 0xFF;
- pMgmt->abyScanBSSID[4] = 0xFF;
- pMgmt->abyScanBSSID[5] = 0xFF;
- pItemSSID->byElementID = WLAN_EID_SSID;
- // clear bssid list
- /* BSSvClearBSSList((void *) pDevice,
- pDevice->bLinkPass); */
- pMgmt->eScanState = WMAC_IS_SCANNING;
- pDevice->byScanBBType = pDevice->byBBType; //lucas
- pDevice->bStopDataPkt = true;
- // Turn off RCR_BSSID filter every time
- MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
- pDevice->byRxMode &= ~RCR_BSSID;
-
- }
- //lucas
- vAdHocBeaconStop(pDevice);
- if ((pDevice->byBBType != BB_TYPE_11A) && (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
- pDevice->byBBType = BB_TYPE_11A;
- CARDvSetBSSMode(pDevice);
- }
- else if ((pDevice->byBBType == BB_TYPE_11A) && (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) {
- pDevice->byBBType = BB_TYPE_11G;
- CARDvSetBSSMode(pDevice);
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning.... channel: [%d]\n", pMgmt->uScanChannel);
- // Set channel
- CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel);
- // Set Baseband to be more sensitive.
-
- if (pDevice->bUpdateBBVGA) {
- BBvSetShortSlotTime(pDevice);
- BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
- BBvUpdatePreEDThreshold(pDevice, true);
- }
- pMgmt->uScanChannel++;
-
- while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
- pMgmt->uScanChannel <= pDevice->byMaxChannel ){
- pMgmt->uScanChannel++;
- }
-
- if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
- // Set Baseband to be not sensitive and rescan
- pDevice->eCommandState = WLAN_CMD_SCAN_END;
-
- }
- if ((pMgmt->b11hEnable == false) ||
- (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
- s_vProbeChannel(pDevice);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, 100);
- return;
- } else {
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
- return;
- }
-
- }
-
- break;
-
- case WLAN_CMD_SCAN_END:
-
- // Set Baseband's sensitivity back.
- if (pDevice->byBBType != pDevice->byScanBBType) {
- pDevice->byBBType = pDevice->byScanBBType;
- CARDvSetBSSMode(pDevice);
- }
-
- if (pDevice->bUpdateBBVGA) {
- BBvSetShortSlotTime(pDevice);
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- BBvUpdatePreEDThreshold(pDevice, false);
- }
-
- // Set channel back
- vAdHocBeaconRestart(pDevice);
- // Set channel back
- CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
- // Set Filter
- if (pMgmt->bCurrBSSIDFilterOn) {
- MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
- pDevice->byRxMode |= RCR_BSSID;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
+
+ while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
+ pMgmt->uScanChannel <= pDevice->byMaxChannel){
+ pMgmt->uScanChannel++;
+ }
+
+ if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
+ // Set Baseband to be not sensitive and rescan
+ pDevice->eCommandState = WLAN_CMD_SCAN_END;
+ }
+ if ((pMgmt->b11hEnable == false) ||
+ (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
+ s_vProbeChannel(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, 100);
+ return;
+ } else {
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
+ return;
+ }
+ }
+
+ break;
+
+ case WLAN_CMD_SCAN_END:
+
+ // Set Baseband's sensitivity back.
+ if (pDevice->byBBType != pDevice->byScanBBType) {
+ pDevice->byBBType = pDevice->byScanBBType;
+ CARDvSetBSSMode(pDevice);
+ }
+
+ if (pDevice->bUpdateBBVGA) {
+ BBvSetShortSlotTime(pDevice);
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ BBvUpdatePreEDThreshold(pDevice, false);
+ }
+
+ // Set channel back
+ vAdHocBeaconRestart(pDevice);
+ // Set channel back
+ CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
+ // Set Filter
+ if (pMgmt->bCurrBSSIDFilterOn) {
+ MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
+ pDevice->byRxMode |= RCR_BSSID;
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
pMgmt->uScanChannel = 0;
- pMgmt->eScanState = WMAC_NO_SCANNING;
- pDevice->bStopDataPkt = false;
+ pMgmt->eScanState = WMAC_NO_SCANNING;
+ pDevice->bStopDataPkt = false;
/*send scan event to wpa_Supplicant*/
PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- s_bCommandComplete(pDevice);
- break;
+ s_bCommandComplete(pDevice);
+ break;
- case WLAN_CMD_DISASSOCIATE_START :
+ case WLAN_CMD_DISASSOCIATE_START:
pDevice->byReAssocCount = 0;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- } else {
-
- pDevice->bwextstep0 = false;
- pDevice->bwextstep1 = false;
- pDevice->bwextstep2 = false;
- pDevice->bwextstep3 = false;
- pDevice->bWPASuppWextEnabled = false;
- pDevice->fWPA_Authened = false;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
- // reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((void *) pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- (8),
- &Status);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- // unlock command busy
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = false;
-// pDevice->bBeaconBufReady = false;
- }
- netif_stop_queue(pDevice->dev);
- if (pDevice->bNeedRadioOFF == true)
- CARDbRadioPowerOff(pDevice);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_SSID_START:
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
+ (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ } else {
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
+ pDevice->fWPA_Authened = false;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
+ // reason = 8 : disassoc because sta has left
+ vMgrDisassocBeginSta((void *) pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (8),
+ &Status);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ // unlock command busy
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID->len = 0;
+ memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->sNodeDBTable[0].bActive = false;
+// pDevice->bBeaconBufReady = false;
+ }
+ netif_stop_queue(pDevice->dev);
+ if (pDevice->bNeedRadioOFF == true)
+ CARDbRadioPowerOff(pDevice);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- memcpy(pMgmt->abyAdHocSSID,pMgmt->abyDesireSSID,
- ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
- }
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
-
- if (pItemSSID->len == pItemSSIDCurr->len) {
- if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- }
- // set initial state
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- PSvDisablePowerSaving((void *) pDevice);
- BSSvClearNodeDBTable(pDevice, 0);
- vMgrJoinBSSBegin((void *) pDevice, &Status);
- // if Infra mode
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
- // Call mgr to begin the deauthentication
- // reason = (3) because sta has left ESS
- if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
- vMgrDeAuthenBeginSta((void *)pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- (3),
- &Status);
- }
- // Call mgr to begin the authentication
- vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- pDevice->byLinkWaitCount = 0;
- pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
- vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
- return;
- }
- }
- // if Adhoc mode
- else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- }
- else {
- // start own IBSS
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
- vMgrCreateOwnIBSS((void *) pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
- }
- BSSvAddMulticastNode(pDevice);
- }
- s_bClearBSSID_SCAN(pDevice);
- }
- // if SSID not found
- else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
- pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
- // start own IBSS
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
- vMgrCreateOwnIBSS((void *) pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
- }
- BSSvAddMulticastNode(pDevice);
- s_bClearBSSID_SCAN(pDevice);
+ if (pDevice->bRadioOff == true) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+
+ memcpy(pMgmt->abyAdHocSSID, pMgmt->abyDesireSSID,
+ ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
+
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);
+
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n", pItemSSID->len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n", pItemSSIDCurr->len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
+ }
+
+ if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
+ ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
+ if (pItemSSID->len == pItemSSIDCurr->len) {
+ if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ }
+ netif_stop_queue(pDevice->dev);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ }
+ // set initial state
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->eCurrMode = WMAC_MODE_STANDBY;
+ PSvDisablePowerSaving((void *) pDevice);
+ BSSvClearNodeDBTable(pDevice, 0);
+ vMgrJoinBSSBegin((void *) pDevice, &Status);
+ // if Infra mode
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
+ // Call mgr to begin the deauthentication
+ // reason = (3) because sta has left ESS
+ if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
+ vMgrDeAuthenBeginSta((void *)pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (3),
+ &Status);
+ }
+ // Call mgr to begin the authentication
+ vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
+ if (Status == CMD_STATUS_SUCCESS) {
+ pDevice->byLinkWaitCount = 0;
+ pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
+ vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
+ spin_unlock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
+ return;
+ }
+ }
+ // if Adhoc mode
+ else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
+ if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
+ pMgmt->sNodeDBTable[0].bActive = true;
+ pMgmt->sNodeDBTable[0].uInActiveCount = 0;
+ } else {
+ // start own IBSS
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
+ }
+ BSSvAddMulticastNode(pDevice);
+ }
+ s_bClearBSSID_SCAN(pDevice);
+ }
+ // if SSID not found
+ else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
+ if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
+ pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
+ // start own IBSS
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
+ }
+ BSSvAddMulticastNode(pDevice);
+ s_bClearBSSID_SCAN(pDevice);
/*
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
- s_bClearBSSID_SCAN(pDevice);
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
+ if (netif_queue_stopped(pDevice->dev)){
+ netif_wake_queue(pDevice->dev);
+ }
+ s_bClearBSSID_SCAN(pDevice);
*/
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
- // if(pDevice->bWPASuppWextEnabled == true)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_AUTHENTICATE_WAIT :
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
- if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
+ // if(pDevice->bWPASuppWextEnabled == true)
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
+ wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
+ }
+ }
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_AUTHENTICATE_WAIT:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
+ if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
+ pDevice->byLinkWaitCount = 0;
+ // Call mgr to begin the association
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
+ vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
+ if (Status == CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
+ pDevice->byLinkWaitCount = 0;
+ pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
+ vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ } else if (pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
+ printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
+ } else if (pDevice->byLinkWaitCount <= 4) {
+ //mike add:wait another 2 sec if authenticated_frame delay!
+ pDevice->byLinkWaitCount++;
+ printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
+ return;
+ }
pDevice->byLinkWaitCount = 0;
- // Call mgr to begin the association
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
- vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
- pDevice->byLinkWaitCount = 0;
- pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
- else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
- printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
- }
- else if(pDevice->byLinkWaitCount <= 4){ //mike add:wait another 2 sec if authenticated_frame delay!
- pDevice->byLinkWaitCount ++;
- printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
- return;
- }
- pDevice->byLinkWaitCount = 0;
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_ASSOCIATE_WAIT :
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
- if (pDevice->ePSMode != WMAC_POWER_CAM) {
- PSvEnablePowerSaving((void *) pDevice,
- pMgmt->wListenInterval);
- }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_ASSOCIATE_WAIT:
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
+ if (pDevice->ePSMode != WMAC_POWER_CAM) {
+ PSvEnablePowerSaving((void *) pDevice,
+ pMgmt->wListenInterval);
+ }
/*
- if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
- KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
- }
+ if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
+ KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
+ }
*/
- pDevice->byLinkWaitCount = 0;
- pDevice->byReAssocCount = 0;
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- s_bClearBSSID_SCAN(pDevice);
-
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
-
- }
- else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
- printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
- }
- else if(pDevice->byLinkWaitCount <= 4){ //mike add:wait another 2 sec if associated_frame delay!
- pDevice->byLinkWaitCount ++;
- printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
- return;
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_AP_MODE_START :
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- cancel_delayed_work_sync(&pDevice->second_callback_work);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- if (pDevice->bEnableHostWEP == true)
- BSSvClearNodeDBTable(pDevice, 1);
- else
- BSSvClearNodeDBTable(pDevice, 0);
- pDevice->uAssocCount = 0;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bFixRate = false;
-
- vMgrCreateOwnIBSS((void *) pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "vMgrCreateOwnIBSS fail!\n");
- }
- // always turn off unicast bit
- MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
- pDevice->byRxMode &= ~RCR_UNICAST;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode );
- BSSvAddMulticastNode(pDevice);
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- schedule_delayed_work(&pDevice->second_callback_work, HZ);
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_TX_PSPACKET_START :
- // DTIM Multicast tx
- if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
- pMgmt->abyPSTxMap[0] &= ~byMask[0];
- pDevice->bMoreData = false;
- }
- else {
- pDevice->bMoreData = true;
- }
-
- if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n");
- }
-
- pMgmt->sNodeDBTable[0].wEnQueueCnt--;
- }
- }
-
- // PS nodes tx
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive &&
- pMgmt->sNodeDBTable[ii].bRxPSPoll) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
- ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pDevice->bMoreData = false;
- }
- else {
- pDevice->bMoreData = true;
- }
-
- if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n");
- }
-
- pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
- // check if sta ps enable, wait next pspoll
- // if sta ps disable, send all pending buffers.
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- break;
- }
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii);
- }
- pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
- }
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_RADIO_START:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_RADIO_START\n");
- // if (pDevice->bRadioCmd == true)
- // CARDbRadioPowerOn(pDevice);
- // else
- // CARDbRadioPowerOff(pDevice);
-
- {
- int ntStatus = STATUS_SUCCESS;
- u8 byTmp;
-
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_GPIOCTL1,
- MESSAGE_REQUEST_MACREG,
- 1,
- &byTmp);
-
- if ( ntStatus != STATUS_SUCCESS ) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if ( (byTmp & GPIO3_DATA) == 0 ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_OFF........................\n");
- // Old commands are useless.
- // empty command Q
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
- //0415pDevice->bCmdRunning = false;
- pDevice->bCmdClear = true;
- pDevice->bStopTx0Pkt = false;
- pDevice->bStopDataPkt = true;
-
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = false;
- spin_unlock_irq(&pDevice->lock);
- KeyvInitTable(pDevice,&pDevice->sKey);
- spin_lock_irq(&pDevice->lock);
- pMgmt->byCSSPK = KEY_CTL_NONE;
- pMgmt->byCSSGK = KEY_CTL_NONE;
-
- if (pDevice->bLinkPass == true) {
- // reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((void *) pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- (8),
- &Status);
- pDevice->bLinkPass = false;
- // unlock command busy
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = false;
- // if(pDevice->bWPASuppWextEnabled == true)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- pDevice->bwextstep0 = false;
- pDevice->bwextstep1 = false;
- pDevice->bwextstep2 = false;
- pDevice->bwextstep3 = false;
- pDevice->bWPASuppWextEnabled = false;
- //clear current SSID
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
- //clear desired SSID
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
-
- netif_stop_queue(pDevice->dev);
- CARDbRadioPowerOff(pDevice);
- MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_OFF);
- pDevice->bHWRadioOff = true;
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_ON........................\n");
- pDevice->bHWRadioOff = false;
- CARDbRadioPowerOn(pDevice);
- MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_ON);
- }
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
-
- pDevice->bStopDataPkt = true;
- pDevice->byBBVGACurrent = pDevice->byBBVGANew;
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
- pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_TBTT_WAKEUP_START:
- PSbIsNextTBTTWakeUp(pDevice);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_BECON_SEND_START:
- bMgrPrepareBeaconToSend(pDevice, pMgmt);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_SETPOWER_START:
-
- RFbSetPower(pDevice, pDevice->wCurrentRate, pMgmt->uCurrChannel);
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_CHANGE_ANTENNA_START:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change from Antenna%d to", (int)pDevice->dwRxAntennaSel);
- if ( pDevice->dwRxAntennaSel == 0) {
- pDevice->dwRxAntennaSel=1;
- if (pDevice->bTxRxAntInv == true)
- BBvSetAntennaMode(pDevice, ANT_RXA);
- else
- BBvSetAntennaMode(pDevice, ANT_RXB);
- } else {
- pDevice->dwRxAntennaSel=0;
- if (pDevice->bTxRxAntInv == true)
- BBvSetAntennaMode(pDevice, ANT_RXB);
- else
- BBvSetAntennaMode(pDevice, ANT_RXA);
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_REMOVE_ALLKEY_START:
- KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_MAC_DISPOWERSAVING_START:
- ControlvReadByte (pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PSCTL, &byData);
- if ( (byData & PSCTL_PS) != 0 ) {
- // disable power saving hw function
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_DISABLE_PS,
- 0,
- 0,
- 0,
- NULL
- );
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_11H_CHSW_START:
- CARDbSetMediaChannel(pDevice, pDevice->byNewChannel);
- pDevice->bChannelSwitch = false;
- pMgmt->uCurrChannel = pDevice->byNewChannel;
- pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
- break;
-
- default:
- s_bCommandComplete(pDevice);
- break;
- } //switch
-
- spin_unlock_irq(&pDevice->lock);
- return;
+ pDevice->byLinkWaitCount = 0;
+ pDevice->byReAssocCount = 0;
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
+ s_bClearBSSID_SCAN(pDevice);
+
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+
+ } else if (pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
+ printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
+ } else if (pDevice->byLinkWaitCount <= 4) {
+ //mike add:wait another 2 sec if associated_frame delay!
+ pDevice->byLinkWaitCount++;
+ printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
+ return;
+ }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_AP_MODE_START:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");
+
+ if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
+ cancel_delayed_work_sync(&pDevice->second_callback_work);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->eCurrMode = WMAC_MODE_STANDBY;
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ if (pDevice->bEnableHostWEP == true)
+ BSSvClearNodeDBTable(pDevice, 1);
+ else
+ BSSvClearNodeDBTable(pDevice, 0);
+ pDevice->uAssocCount = 0;
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pDevice->bFixRate = false;
+
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "vMgrCreateOwnIBSS fail!\n");
+ }
+ // always turn off unicast bit
+ MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
+ pDevice->byRxMode &= ~RCR_UNICAST;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode);
+ BSSvAddMulticastNode(pDevice);
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
+ schedule_delayed_work(&pDevice->second_callback_work, HZ);
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_TX_PSPACKET_START:
+ // DTIM Multicast tx
+ if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
+ if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
+ pMgmt->abyPSTxMap[0] &= ~byMask[0];
+ pDevice->bMoreData = false;
+ } else {
+ pDevice->bMoreData = true;
+ }
+
+ if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail\n");
+
+ pMgmt->sNodeDBTable[0].wEnQueueCnt--;
+ }
+ }
+
+ // PS nodes tx
+ for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive &&
+ pMgmt->sNodeDBTable[ii].bRxPSPoll) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
+ ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
+ if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
+ // clear tx map
+ pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
+ ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
+ pDevice->bMoreData = false;
+ } else {
+ pDevice->bMoreData = true;
+ }
+
+ if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail\n");
+
+ pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
+ // check if sta ps enable, wait next pspoll
+ // if sta ps disable, send all pending buffers.
+ if (pMgmt->sNodeDBTable[ii].bPSEnable)
+ break;
+ }
+ if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
+ // clear tx map
+ pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
+ ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear\n", ii);
+ }
+ pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
+ }
+ }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_RADIO_START:
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_RADIO_START\n");
+// if (pDevice->bRadioCmd == true)
+// CARDbRadioPowerOn(pDevice);
+// else
+// CARDbRadioPowerOff(pDevice);
+ {
+ int ntStatus = STATUS_SUCCESS;
+ u8 byTmp;
+
+ ntStatus = CONTROLnsRequestIn(pDevice,
+ MESSAGE_TYPE_READ,
+ MAC_REG_GPIOCTL1,
+ MESSAGE_REQUEST_MACREG,
+ 1,
+ &byTmp);
+
+ if (ntStatus != STATUS_SUCCESS) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ if ((byTmp & GPIO3_DATA) == 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_OFF........................\n");
+ // Old commands are useless.
+ // empty command Q
+ pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
+ pDevice->uCmdDequeueIdx = 0;
+ pDevice->uCmdEnqueueIdx = 0;
+ //0415pDevice->bCmdRunning = false;
+ pDevice->bCmdClear = true;
+ pDevice->bStopTx0Pkt = false;
+ pDevice->bStopDataPkt = true;
+
+ pDevice->byKeyIndex = 0;
+ pDevice->bTransmitKey = false;
+ spin_unlock_irq(&pDevice->lock);
+ KeyvInitTable(pDevice, &pDevice->sKey);
+ spin_lock_irq(&pDevice->lock);
+ pMgmt->byCSSPK = KEY_CTL_NONE;
+ pMgmt->byCSSGK = KEY_CTL_NONE;
+
+ if (pDevice->bLinkPass == true) {
+ // reason = 8 : disassoc because sta has left
+ vMgrDisassocBeginSta((void *) pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (8),
+ &Status);
+ pDevice->bLinkPass = false;
+ // unlock command busy
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->sNodeDBTable[0].bActive = false;
+ // if(pDevice->bWPASuppWextEnabled == true)
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
+ wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
+ }
+ }
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
+ //clear current SSID
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID->len = 0;
+ memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
+ //clear desired SSID
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->len = 0;
+ memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
+
+ netif_stop_queue(pDevice->dev);
+ CARDbRadioPowerOff(pDevice);
+ MACvRegBitsOn(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_OFF);
+ pDevice->bHWRadioOff = true;
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_ON........................\n");
+ pDevice->bHWRadioOff = false;
+ CARDbRadioPowerOn(pDevice);
+ MACvRegBitsOff(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_ON);
+ }
+ }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
+
+ pDevice->bStopDataPkt = true;
+ pDevice->byBBVGACurrent = pDevice->byBBVGANew;
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
+ pDevice->bStopDataPkt = false;
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_TBTT_WAKEUP_START:
+ PSbIsNextTBTTWakeUp(pDevice);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_BECON_SEND_START:
+ bMgrPrepareBeaconToSend(pDevice, pMgmt);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_SETPOWER_START:
+
+ RFbSetPower(pDevice, pDevice->wCurrentRate, pMgmt->uCurrChannel);
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_CHANGE_ANTENNA_START:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change from Antenna%d to", (int)pDevice->dwRxAntennaSel);
+ if (pDevice->dwRxAntennaSel == 0) {
+ pDevice->dwRxAntennaSel = 1;
+ if (pDevice->bTxRxAntInv == true)
+ BBvSetAntennaMode(pDevice, ANT_RXA);
+ else
+ BBvSetAntennaMode(pDevice, ANT_RXB);
+ } else {
+ pDevice->dwRxAntennaSel = 0;
+ if (pDevice->bTxRxAntInv == true)
+ BBvSetAntennaMode(pDevice, ANT_RXB);
+ else
+ BBvSetAntennaMode(pDevice, ANT_RXA);
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_REMOVE_ALLKEY_START:
+ KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_MAC_DISPOWERSAVING_START:
+ ControlvReadByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PSCTL, &byData);
+ if ((byData & PSCTL_PS) != 0) {
+ // disable power saving hw function
+ CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_DISABLE_PS,
+ 0,
+ 0,
+ 0,
+ NULL
+ );
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_11H_CHSW_START:
+ CARDbSetMediaChannel(pDevice, pDevice->byNewChannel);
+ pDevice->bChannelSwitch = false;
+ pMgmt->uCurrChannel = pDevice->byNewChannel;
+ pDevice->bStopDataPkt = false;
+ s_bCommandComplete(pDevice);
+ break;
+
+ default:
+ s_bCommandComplete(pDevice);
+ break;
+ } //switch
+
+ spin_unlock_irq(&pDevice->lock);
+ return;
}
static int s_bCommandComplete(struct vnt_private *pDevice)
@@ -953,152 +925,146 @@ static int s_bCommandComplete(struct vnt_private *pDevice)
int bRadioCmd = false;
int bForceSCAN = true;
- pDevice->eCommandState = WLAN_CMD_IDLE;
- if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
- //Command Queue Empty
- pDevice->bCmdRunning = false;
- return true;
- }
- else {
- pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
- pSSID = (PWLAN_IE_SSID)pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].abyCmdDesireSSID;
- bRadioCmd = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bRadioCmd;
- bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue++;
- pDevice->bCmdRunning = true;
- switch ( pDevice->eCommand ) {
- case WLAN_CMD_BSSID_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_BSSID_SCAN\n");
- pDevice->eCommandState = WLAN_CMD_SCAN_START;
- pMgmt->uScanChannel = 0;
- if (pSSID->len != 0) {
- memcpy(pMgmt->abyScanSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- } else {
- memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- }
+ pDevice->eCommandState = WLAN_CMD_IDLE;
+ if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
+ //Command Queue Empty
+ pDevice->bCmdRunning = false;
+ return true;
+ } else {
+ pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
+ pSSID = (PWLAN_IE_SSID)pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].abyCmdDesireSSID;
+ bRadioCmd = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bRadioCmd;
+ bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
+ ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
+ pDevice->cbFreeCmdQueue++;
+ pDevice->bCmdRunning = true;
+ switch (pDevice->eCommand) {
+ case WLAN_CMD_BSSID_SCAN:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_BSSID_SCAN\n");
+ pDevice->eCommandState = WLAN_CMD_SCAN_START;
+ pMgmt->uScanChannel = 0;
+ if (pSSID->len != 0)
+ memcpy(pMgmt->abyScanSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ else
+ memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
/*
- if ((bForceSCAN == false) && (pDevice->bLinkPass == true)) {
- if ((pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) &&
- ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, pSSID->len))) {
- pDevice->eCommandState = WLAN_CMD_IDLE;
- }
- }
+ if ((bForceSCAN == false) && (pDevice->bLinkPass == true)) {
+ if ((pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) &&
+ ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, pSSID->len))) {
+ pDevice->eCommandState = WLAN_CMD_IDLE;
+ }
+ }
*/
- break;
- case WLAN_CMD_SSID:
- pDevice->eCommandState = WLAN_CMD_SSID_START;
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- if (pSSID->len != 0)
- memcpy(pMgmt->abyDesireSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SSID_START\n");
- break;
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCommandState = WLAN_CMD_DISASSOCIATE_START;
- break;
- case WLAN_CMD_RX_PSPOLL:
- pDevice->eCommandState = WLAN_CMD_TX_PSPACKET_START;
- break;
- case WLAN_CMD_RUN_AP:
- pDevice->eCommandState = WLAN_CMD_AP_MODE_START;
- break;
- case WLAN_CMD_RADIO:
- pDevice->eCommandState = WLAN_CMD_RADIO_START;
- pDevice->bRadioCmd = bRadioCmd;
- break;
- case WLAN_CMD_CHANGE_BBSENSITIVITY:
- pDevice->eCommandState = WLAN_CMD_CHANGE_BBSENSITIVITY_START;
- break;
-
- case WLAN_CMD_TBTT_WAKEUP:
- pDevice->eCommandState = WLAN_CMD_TBTT_WAKEUP_START;
- break;
-
- case WLAN_CMD_BECON_SEND:
- pDevice->eCommandState = WLAN_CMD_BECON_SEND_START;
- break;
-
- case WLAN_CMD_SETPOWER:
- pDevice->eCommandState = WLAN_CMD_SETPOWER_START;
- break;
-
- case WLAN_CMD_CHANGE_ANTENNA:
- pDevice->eCommandState = WLAN_CMD_CHANGE_ANTENNA_START;
- break;
-
- case WLAN_CMD_REMOVE_ALLKEY:
- pDevice->eCommandState = WLAN_CMD_REMOVE_ALLKEY_START;
- break;
-
- case WLAN_CMD_MAC_DISPOWERSAVING:
- pDevice->eCommandState = WLAN_CMD_MAC_DISPOWERSAVING_START;
- break;
-
- case WLAN_CMD_11H_CHSW:
- pDevice->eCommandState = WLAN_CMD_11H_CHSW_START;
- break;
-
- default:
- break;
-
- }
- vCommandTimerWait(pDevice, 0);
- }
-
- return true;
+ break;
+ case WLAN_CMD_SSID:
+ pDevice->eCommandState = WLAN_CMD_SSID_START;
+ if (pSSID->len > WLAN_SSID_MAXLEN)
+ pSSID->len = WLAN_SSID_MAXLEN;
+ if (pSSID->len != 0)
+ memcpy(pMgmt->abyDesireSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SSID_START\n");
+ break;
+ case WLAN_CMD_DISASSOCIATE:
+ pDevice->eCommandState = WLAN_CMD_DISASSOCIATE_START;
+ break;
+ case WLAN_CMD_RX_PSPOLL:
+ pDevice->eCommandState = WLAN_CMD_TX_PSPACKET_START;
+ break;
+ case WLAN_CMD_RUN_AP:
+ pDevice->eCommandState = WLAN_CMD_AP_MODE_START;
+ break;
+ case WLAN_CMD_RADIO:
+ pDevice->eCommandState = WLAN_CMD_RADIO_START;
+ pDevice->bRadioCmd = bRadioCmd;
+ break;
+ case WLAN_CMD_CHANGE_BBSENSITIVITY:
+ pDevice->eCommandState = WLAN_CMD_CHANGE_BBSENSITIVITY_START;
+ break;
+
+ case WLAN_CMD_TBTT_WAKEUP:
+ pDevice->eCommandState = WLAN_CMD_TBTT_WAKEUP_START;
+ break;
+
+ case WLAN_CMD_BECON_SEND:
+ pDevice->eCommandState = WLAN_CMD_BECON_SEND_START;
+ break;
+
+ case WLAN_CMD_SETPOWER:
+ pDevice->eCommandState = WLAN_CMD_SETPOWER_START;
+ break;
+
+ case WLAN_CMD_CHANGE_ANTENNA:
+ pDevice->eCommandState = WLAN_CMD_CHANGE_ANTENNA_START;
+ break;
+
+ case WLAN_CMD_REMOVE_ALLKEY:
+ pDevice->eCommandState = WLAN_CMD_REMOVE_ALLKEY_START;
+ break;
+
+ case WLAN_CMD_MAC_DISPOWERSAVING:
+ pDevice->eCommandState = WLAN_CMD_MAC_DISPOWERSAVING_START;
+ break;
+
+ case WLAN_CMD_11H_CHSW:
+ pDevice->eCommandState = WLAN_CMD_11H_CHSW_START;
+ break;
+
+ default:
+ break;
+ }
+ vCommandTimerWait(pDevice, 0);
+ }
+
+ return true;
}
int bScheduleCommand(struct vnt_private *pDevice,
CMD_CODE eCommand, u8 *pbyItem0)
{
- if (pDevice->cbFreeCmdQueue == 0) {
- return (false);
- }
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
- memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- if (pbyItem0 != NULL) {
- switch (eCommand) {
- case WLAN_CMD_BSSID_SCAN:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- break;
-
- case WLAN_CMD_SSID:
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- break;
-
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bNeedRadioOFF = *((int *)pbyItem0);
- break;
+ if (pDevice->cbFreeCmdQueue == 0)
+ return false;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
+ memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ if (pbyItem0 != NULL) {
+ switch (eCommand) {
+ case WLAN_CMD_BSSID_SCAN:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
+ memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
+ pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ break;
+
+ case WLAN_CMD_SSID:
+ memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
+ pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ break;
+
+ case WLAN_CMD_DISASSOCIATE:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bNeedRadioOFF = *((int *)pbyItem0);
+ break;
/*
- case WLAN_CMD_DEAUTH:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].wDeAuthenReason = *((u16 *)pbyItem0);
- break;
+ case WLAN_CMD_DEAUTH:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].wDeAuthenReason = *((u16 *)pbyItem0);
+ break;
*/
- case WLAN_CMD_RADIO:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bRadioCmd = *((int *)pbyItem0);
- break;
+ case WLAN_CMD_RADIO:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bRadioCmd = *((int *)pbyItem0);
+ break;
+
+ default:
+ break;
+ }
+ }
- default:
- break;
- }
- }
+ ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
+ pDevice->cbFreeCmdQueue--;
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue--;
+ if (pDevice->bCmdRunning == false)
+ s_bCommandComplete(pDevice);
- if (pDevice->bCmdRunning == false) {
- s_bCommandComplete(pDevice);
- }
- else {
- }
- return (true);
+ return true;
}
@@ -1121,16 +1087,16 @@ static int s_bClearBSSID_SCAN(struct vnt_private *pDevice)
unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
unsigned int ii;
- if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
- for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii ++) {
- if (pDevice->eCmdQueue[uCmdDequeueIdx].eCmd == WLAN_CMD_BSSID_SCAN)
- pDevice->eCmdQueue[uCmdDequeueIdx].eCmd = WLAN_CMD_IDLE;
- ADD_ONE_WITH_WRAP_AROUND(uCmdDequeueIdx, CMD_Q_SIZE);
- if (uCmdDequeueIdx == pDevice->uCmdEnqueueIdx)
- break;
- }
- }
- return true;
+ if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
+ for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii++) {
+ if (pDevice->eCmdQueue[uCmdDequeueIdx].eCmd == WLAN_CMD_BSSID_SCAN)
+ pDevice->eCmdQueue[uCmdDequeueIdx].eCmd = WLAN_CMD_IDLE;
+ ADD_ONE_WITH_WRAP_AROUND(uCmdDequeueIdx, CMD_Q_SIZE);
+ if (uCmdDequeueIdx == pDevice->uCmdEnqueueIdx)
+ break;
+ }
+ }
+ return true;
}
//mike add:reset command timer
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index e26c415..d74b0e7 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -2961,7 +2961,7 @@ static struct vnt_tx_mgmt *s_MgrMakeBeacon(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wCurrBeaconPeriod,
u32 uCurrChannel, u16 wCurrATIMWinodw, u8 *pDstAddr,
PWLAN_IE_SSID pCurrSSID, u8 *pCurrBSSID,
@@ -3081,7 +3081,7 @@ struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
u16 wListenInterval,
PWLAN_IE_SSID pCurrSSID,
@@ -3329,7 +3329,7 @@ struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
u16 wListenInterval, PWLAN_IE_SSID pCurrSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
@@ -3576,7 +3576,7 @@ struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
@@ -3642,7 +3642,7 @@ struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeReAssocResponse(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeReAssocResponse(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
diff --git a/drivers/staging/vt6656/wpa.c b/drivers/staging/vt6656/wpa.c
index 01db4e7..403c295 100644
--- a/drivers/staging/vt6656/wpa.c
+++ b/drivers/staging/vt6656/wpa.c
@@ -43,12 +43,12 @@
static int msglevel =MSG_LEVEL_INFO;
-const u8 abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
-const u8 abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
-const u8 abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
-const u8 abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
-const u8 abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
-const u8 abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
+static const u8 abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
+static const u8 abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
+static const u8 abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
+static const u8 abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
+static const u8 abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
+static const u8 abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
/*+
*
diff --git a/drivers/staging/vt6656/wpa2.c b/drivers/staging/vt6656/wpa2.c
index aa221618..df55417 100644
--- a/drivers/staging/vt6656/wpa2.c
+++ b/drivers/staging/vt6656/wpa2.c
@@ -37,14 +37,14 @@
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
-const u8 abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-const u8 abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const u8 abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-const u8 abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-const u8 abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-
-const u8 abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const u8 abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const u8 abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
+static const u8 abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const u8 abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
+static const u8 abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const u8 abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
+
+static const u8 abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const u8 abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*+
*
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 003bd7c..f4a8a5c 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -86,7 +86,7 @@ int wpa_set_keys(struct vnt_private *pDevice, void *ctx)
return ret;
}
- if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
+ if (param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index cac7720..aef0855 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -56,7 +56,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
RTS_on = true; /* Using RTS */
else {
if (pT01->T01_modulation_type) { /* Is using OFDM */
- if (CURRENT_PROTECT_MECHANISM) /* Is using protect */
+ /* Is using protect */
+ if (CURRENT_PROTECT_MECHANISM)
CTS_on = true; /* Using CTS */
}
}
@@ -69,9 +70,9 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
* ACK Rate : 24 Mega bps
* ACK frame length = 14 bytes */
Duration = 2*DEFAULT_SIFSTIME +
- 2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
- ((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym +
- ((112 + 22 + 95)/96)*Tsym;
+ 2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
+ ((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym +
+ ((112 + 22 + 95)/96)*Tsym;
} else { /* DSSS */
/* CTS duration
* 2 SIFS + DATA transmit time + 1 ACK
@@ -92,13 +93,15 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
* CTS Rate : 24 Mega bps
* CTS frame length = 14 bytes */
Duration += (DEFAULT_SIFSTIME +
- PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
- ((112 + 22 + 95)/96)*Tsym);
+ PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
+ ((112 + 22 + 95)/96)*Tsym);
} else {
/* CTS + 1 SIFS + CTS duration
* CTS Rate : ?? Mega bps
- * CTS frame length = 14 bytes */
- if (pT01->T01_plcp_header_length) /* long preamble */
+ * CTS frame length = 14 bytes
+ */
+ /* long preamble */
+ if (pT01->T01_plcp_header_length)
Duration += LONG_PREAMBLE_PLUS_PLCPHEADER_TIME;
else
Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
@@ -149,8 +152,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
+ Rate-1) / Rate +
DEFAULT_SIFSTIME*3);
}
-
- ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration);
/* ----20061009 add by anson's endian */
pNextT00->value = cpu_to_le32(pNextT00->value);
@@ -159,7 +162,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
buffer += OffsetSize;
pT01 = (struct T01_descriptor *)(buffer+4);
- if (i != 1) /* The last fragment will not have the next fragment */
+ /* The last fragment will not have the next fragment */
+ if (i != 1)
pNextT00 = (struct T00_descriptor *)(buffer+OffsetSize);
}
@@ -189,7 +193,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
}
}
- ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration);
pT00->value = cpu_to_le32(pT00->value);
pT01->value = cpu_to_le32(pT01->value);
/* --end 20061009 add */
@@ -221,9 +226,10 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
CopySize = SizeLeft;
if (SizeLeft > pDes->FragmentThreshold) {
CopySize = pDes->FragmentThreshold;
- pT00->T00_frame_length = 24 + CopySize; /* Set USB length */
- } else
- pT00->T00_frame_length = 24 + SizeLeft; /* Set USB length */
+ /* Set USB length */
+ pT00->T00_frame_length = 24 + CopySize;
+ } else /* Set USB length */
+ pT00->T00_frame_length = 24 + SizeLeft;
SizeLeft -= CopySize;
@@ -267,21 +273,27 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
/* 931130.5.n */
if (pMds->MicAdd) {
if (!SizeLeft) {
- pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - pMds->MicAdd;
- pMds->MicWriteSize[pMds->MicWriteIndex] = pMds->MicAdd;
+ pMds->MicWriteAddress[pMds->MicWriteIndex] =
+ buffer - pMds->MicAdd;
+ pMds->MicWriteSize[pMds->MicWriteIndex] =
+ pMds->MicAdd;
pMds->MicAdd = 0;
} else if (SizeLeft < 8) { /* 931130.5.p */
pMds->MicAdd = SizeLeft;
- pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - (8 - SizeLeft);
- pMds->MicWriteSize[pMds->MicWriteIndex] = 8 - SizeLeft;
+ pMds->MicWriteAddress[pMds->MicWriteIndex] =
+ buffer - (8 - SizeLeft);
+ pMds->MicWriteSize[pMds->MicWriteIndex] =
+ 8 - SizeLeft;
pMds->MicWriteIndex++;
}
}
/* Does it need to generate the new header for next mpdu? */
if (SizeLeft) {
- buffer = TargetBuffer + Size; /* Get the next 4n start address */
- memcpy(buffer, TargetBuffer, 32); /* Copy 8B USB +24B 802.11 */
+ /* Get the next 4n start address */
+ buffer = TargetBuffer + Size;
+ /* Copy 8B USB +24B 802.11 */
+ memcpy(buffer, TargetBuffer, 32);
pT00 = (struct T00_descriptor *)buffer;
pT00->T00_first_mpdu = 0;
}
@@ -293,7 +305,8 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
pT00->T00_IsLastMpdu = 1;
buffer = (u8 *)pT00 + 8; /* +8 for USB hdr */
buffer[1] &= ~0x04; /* Clear more frag bit of 802.11 frame control */
- pDes->FragmentCount = FragmentCount; /* Update the correct fragment number */
+ /* Update the correct fragment number */
+ pDes->FragmentCount = FragmentCount;
return Size;
}
@@ -330,7 +343,8 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; /* Do not fragment */
/* Copy full data, the 1'st buffer contain all the data 931130.5.j */
- memcpy(TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE); /* Copy header */
+ /* Copy header */
+ memcpy(TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE);
pDes->buffer_address[0] = src_buffer + DOT_11_MAC_HEADER_SIZE;
pDes->buffer_total_size -= DOT_11_MAC_HEADER_SIZE;
pDes->buffer_size[0] = pDes->buffer_total_size;
@@ -358,8 +372,8 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
for (i = 0; i < 2; i++) {
if (i == 1)
ctmp1 = ctmpf;
-
- pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1; /* backup the ta rate and fall back rate */
+ /* backup the ta rate and fall back rate */
+ pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1;
if (ctmp1 == 108)
ctmp2 = 7;
@@ -395,7 +409,8 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
/*
* Set preamble type
*/
- if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0)) /* RATE_1M */
+ /* RATE_1M */
+ if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0))
pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG;
else
pDes->PreambleMode = CURRENT_PREAMBLE_MODE;
@@ -468,12 +483,14 @@ Mds_Tx(struct wbsoft_priv *adapter)
/* Start to fill the data */
do {
FillIndex = pMds->TxFillIndex;
- if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */
+ /* Is owned by software 0:Yes 1:No */
+ if (pMds->TxOwner[FillIndex]) {
pr_debug("[Mds_Tx] Tx Owner is H/W.\n");
break;
}
- XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex); /* Get buffer */
+ /* Get buffer */
+ XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex);
XmitBufSize = 0;
FillCount = 0;
do {
@@ -485,7 +502,8 @@ Mds_Tx(struct wbsoft_priv *adapter)
FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD;
/* 931130.5.b */
FragmentCount = PacketSize/FragmentThreshold + 1;
- stmp = PacketSize + FragmentCount*32 + 8; /* 931130.5.c 8:MIC */
+ /* 931130.5.c 8:MIC */
+ stmp = PacketSize + FragmentCount*32 + 8;
if ((XmitBufSize + stmp) >= MAX_USB_TX_BUFFER)
break; /* buffer is not enough */
@@ -499,18 +517,23 @@ Mds_Tx(struct wbsoft_priv *adapter)
TxDesIndex = pMds->TxDesIndex; /* Get the current ID */
pTxDes->Descriptor_ID = TxDesIndex;
- pMds->TxDesFrom[TxDesIndex] = 2; /* Storing the information of source coming from */
+ /* Storing the information of source coming from */
+ pMds->TxDesFrom[TxDesIndex] = 2;
pMds->TxDesIndex++;
pMds->TxDesIndex %= MAX_USB_TX_DESCRIPTOR;
MLME_GetNextPacket(adapter, pTxDes);
- /* Copy header. 8byte USB + 24byte 802.11Hdr. Set TxRate, Preamble type */
+ /*
+ * Copy header. 8byte USB + 24byte 802.11Hdr.
+ * Set TxRate, Preamble type
+ */
Mds_HeaderCopy(adapter, pTxDes, XmitBufAddress);
/* For speed up Key setting */
if (pTxDes->EapFix) {
- pr_debug("35: EPA 4th frame detected. Size = %d\n", PacketSize);
+ pr_debug("35: EPA 4th frame detected. Size = %d\n",
+ PacketSize);
pHwData->IsKeyPreSet = 1;
}
@@ -524,7 +547,9 @@ Mds_Tx(struct wbsoft_priv *adapter)
XmitBufSize += CurrentSize;
XmitBufAddress += CurrentSize;
- /* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */
+ /* Get packet to transmit completed,
+ * 1:TESTSTA 2:MLME 3: Ndis data
+ */
MLME_SendComplete(adapter, 0, true);
/* Software TSC count 20060214 */
@@ -533,7 +558,12 @@ Mds_Tx(struct wbsoft_priv *adapter)
pMds->TxTsc_2++;
FillCount++; /* 20060928 */
- } while (HAL_USB_MODE_BURST(pHwData)); /* End of multiple MSDU copy loop. false = single true = multiple sending */
+ /*
+ * End of multiple MSDU copy loop.
+ * false = single
+ * true = multiple sending
+ */
+ } while (HAL_USB_MODE_BURST(pHwData));
/* Move to the next one, if necessary */
if (BufferFilled) {
@@ -594,7 +624,8 @@ Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pT02)
pHwData->tx_retry_count[RetryCount] += RetryCount;
else
pHwData->tx_retry_count[7] += RetryCount;
- pr_debug("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
+ pr_debug("dto_tx_retry_count =%d\n",
+ pHwData->dto_tx_retry_count);
MTO_SetTxCount(adapter, TxRate, RetryCount);
}
pHwData->dto_tx_frag_count += (RetryCount+1);
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 811698f..40f6a3e 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -83,7 +83,8 @@
the types of messages displayed */
#ifndef DBG_LVL
#define DBG_LVL 5 /* yields nothing via init_module,
- original value of 5 yields DBG_TRACE_ON and DBG_VERBOSE_ON */
+ original value of 5 yields
+ DBG_TRACE_ON and DBG_VERBOSE_ON */
#endif /* DBG_LVL*/
@@ -105,46 +106,16 @@
#define DBG_LEVEL(A) ((A)->dbgLevel)
-#ifndef PRINTK
-# define PRINTK(S...) printk(S)
-#endif /* PRINTK */
-
-
#ifndef DBG_PRINT
-# define DBG_PRINT(S...) PRINTK(KERN_DEBUG S)
+# define DBG_PRINT(S...) printk(KERN_DEBUG S)
#endif /* DBG_PRINT */
#ifndef DBG_PRINTC
-# define DBG_PRINTC(S...) PRINTK(S)
+# define DBG_PRINTC(S...) printk(S)
#endif /* DBG_PRINTC */
-#ifndef DBG_TRAP
-# define DBG_TRAP {}
-#endif /* DBG_TRAP */
-
-
-#define _ENTER_STR ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
-#define _LEAVE_STR "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
-
-
-#define _DBG_ENTER(A) \
- DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), ++DBG_LEVEL(A), \
- _ENTER_STR, __func__)
-#define _DBG_LEAVE(A) \
- DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), DBG_LEVEL(A)--, \
- _LEAVE_STR, __func__)
-
-
-#define DBG_FUNC(F)
-
-#define DBG_ENTER(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
- _DBG_ENTER(A); }
-
-#define DBG_LEAVE(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
- _DBG_LEAVE(A); }
-
#define DBG_PARAM(A, N, F, S...) {if (DBG_FLAGS(A) & DBG_PARAM_ON) \
DBG_PRINT(" %s -- "F"\n", N, S); }
@@ -153,7 +124,6 @@
if (DBG_FLAGS(A) & DBG_ERROR_ON) { \
DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __func__); \
DBG_PRINTC(S); \
- DBG_TRAP; \
} } while (0)
@@ -193,26 +163,22 @@
if (!(C)) { \
DBG_PRINT("ASSERT(%s) -- %s#%d (%s)\n", \
#C, __FILE__, __LINE__, __func__); \
- DBG_TRAP; \
} } while (0)
-typedef struct {
- char *dbgName;
- int dbgLevel;
- unsigned long DebugFlag;
-} dbg_info_t;
+struct dbg_info {
+ char *dbgName;
+ int dbgLevel;
+ unsigned long DebugFlag;
+};
+
+extern struct dbg_info *DbgInfo;
/****************************************************************************/
#else /* DBG */
/****************************************************************************/
-#define DBG_DEFN
-#define DBG_TRAP
-#define DBG_FUNC(F)
#define DBG_PRINT(S...)
-#define DBG_ENTER(A)
-#define DBG_LEAVE(A)
#define DBG_PARAM(A, N, F, S...)
#define DBG_ERROR(A, S...)
#define DBG_WARNING(A, S...)
diff --git a/drivers/staging/wlags49_h2/sta_h25.c b/drivers/staging/wlags49_h2/sta_h25.c
index 5b6f670..eccd780 100644
--- a/drivers/staging/wlags49_h2/sta_h25.c
+++ b/drivers/staging/wlags49_h2/sta_h25.c
@@ -5211,7 +5211,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0000,
0x000F429B, // Start execution address
},
- { 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
+ { 0000, 0000, 0000, 0000, 00000000, 0000, NULL}
};
static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
@@ -5247,8 +5247,8 @@ memimage fw_image = {
"FUPU7D37dhfwci\001C", //signature, <format number>, C/Bin type
(CFG_PROG_STRCT *) fw_image_code,
0x000F429B,
- 00000000, //(dummy) pdaplug
- 00000000, //(dummy) priplug
+ NULL, //(dummy) pdaplug
+ NULL, //(dummy) priplug
(CFG_RANGE20_STRCT *) fw_image_infocompat,
(CFG_IDENTITY_STRCT *) fw_image_infoidentity,
};
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index a458705..3f7cf41 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -100,15 +100,6 @@
#include <wl_netdev.h>
#include <wl_cs.h>
-
-/*******************************************************************************
- * global definitions
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
/*******************************************************************************
* wl_adapter_attach()
*******************************************************************************
@@ -133,10 +124,6 @@ static int wl_adapter_attach(struct pcmcia_device *link)
struct net_device *dev;
struct wl_private *lp;
int ret;
- /*--------------------------------------------------------------------*/
-
- DBG_FUNC("wl_adapter_attach");
- DBG_ENTER(DbgInfo);
dev = wl_device_alloc();
if (dev == NULL) {
@@ -158,7 +145,6 @@ static int wl_adapter_attach(struct pcmcia_device *link)
if (ret != 0)
wl_device_dealloc(dev);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wl_adapter_attach */
/*============================================================================*/
@@ -168,10 +154,7 @@ static int wl_adapter_attach(struct pcmcia_device *link)
static void wl_adapter_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_detach");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "link", "0x%p", link);
wl_adapter_release(link);
@@ -180,24 +163,18 @@ static void wl_adapter_detach(struct pcmcia_device *link)
unregister_netdev(dev);
wl_device_dealloc(dev);
}
-
- DBG_LEAVE(DbgInfo);
} /* wl_adapter_detach */
/*============================================================================*/
void wl_adapter_release(struct pcmcia_device *link)
{
- DBG_FUNC("wl_adapter_release");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "link", "0x%p", link);
/* Stop hardware */
wl_remove(link->priv);
pcmcia_disable_device(link);
-
- DBG_LEAVE(DbgInfo);
} /* wl_adapter_release */
/*============================================================================*/
@@ -229,10 +206,7 @@ int wl_adapter_insert(struct pcmcia_device *link)
{
struct net_device *dev;
int ret;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_insert");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "link", "0x%p", link);
dev = link->priv;
@@ -259,20 +233,17 @@ int wl_adapter_insert(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
ret = register_netdev(dev);
if (ret != 0) {
- printk("%s: register_netdev() failed\n", MODULE_NAME);
+ printk("%s: register_netdev() failed\n", KBUILD_MODNAME);
goto failed;
}
printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, mac_address"
" %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr);
- DBG_LEAVE(DbgInfo);
return 0;
failed:
wl_adapter_release(link);
-
- DBG_LEAVE(DbgInfo);
return ret;
} /* wl_adapter_insert */
/*============================================================================*/
@@ -303,17 +274,12 @@ int wl_adapter_open(struct net_device *dev)
struct pcmcia_device *link = lp->link;
int result = 0;
int hcf_status = HCF_SUCCESS;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_open");
- DBG_ENTER(DbgInfo);
DBG_PRINT("%s\n", VERSION_INFO);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if (!pcmcia_dev_present(link)) {
- DBG_LEAVE(DbgInfo);
+ if (!pcmcia_dev_present(link))
return -ENODEV;
- }
link->open++;
@@ -324,7 +290,6 @@ int wl_adapter_open(struct net_device *dev)
result = -ENODEV;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wl_adapter_open */
/*============================================================================*/
@@ -353,23 +318,17 @@ int wl_adapter_close(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
struct pcmcia_device *link = lp->link;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_close");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if (link == NULL) {
- DBG_LEAVE(DbgInfo);
+ if (link == NULL)
return -ENODEV;
- }
DBG_TRACE(DbgInfo, "%s: Shutting down adapter.\n", dev->name);
wl_close(dev);
link->open--;
- DBG_LEAVE(DbgInfo);
return 0;
} /* wl_adapter_close */
/*============================================================================*/
@@ -420,17 +379,7 @@ static struct pcmcia_driver wlags49_driver = {
******************************************************************************/
int wl_adapter_init_module(void)
{
- int ret;
- /*--------------------------------------------------------------------*/
-
- DBG_FUNC("wl_adapter_init_module");
- DBG_ENTER(DbgInfo);
- DBG_TRACE(DbgInfo, "wl_adapter_init_module() -- PCMCIA\n");
-
- ret = pcmcia_register_driver(&wlags49_driver);
-
- DBG_LEAVE(DbgInfo);
- return ret;
+ return pcmcia_register_driver(&wlags49_driver);
} /* wl_adapter_init_module */
/*============================================================================*/
@@ -454,15 +403,7 @@ int wl_adapter_init_module(void)
******************************************************************************/
void wl_adapter_cleanup_module(void)
{
- DBG_FUNC("wl_adapter_cleanup_module");
- DBG_ENTER(DbgInfo);
- DBG_TRACE(DbgInfo, "wl_adapter_cleanup_module() -- PCMCIA\n");
-
-
pcmcia_unregister_driver(&wlags49_driver);
-
- DBG_LEAVE(DbgInfo);
- return;
} /* wl_adapter_cleanup_module */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 081cc6f..9a597a9 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -86,4 +86,4 @@ const char *DbgEvent( int mask );
-#endif // __WL_CS_H__
+#endif /* __WL_CS_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 51293d9..389c23b 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -70,21 +70,6 @@
#include <wl_enc.h>
-
-
-
-/*******************************************************************************
- * global definitions
- ******************************************************************************/
-#if DBG
-
-extern dbg_info_t *DbgInfo;
-
-#endif /* DBG */
-
-
-
-
/*******************************************************************************
* wl_wep_code()
*******************************************************************************
diff --git a/drivers/staging/wlags49_h2/wl_enc.h b/drivers/staging/wlags49_h2/wl_enc.h
index 1804611..03a52fb 100644
--- a/drivers/staging/wlags49_h2/wl_enc.h
+++ b/drivers/staging/wlags49_h2/wl_enc.h
@@ -69,7 +69,7 @@
******************************************************************************/
#define CRYPT_CODE "57617665A5D6"
#define ENCRYPTION_LEN 102
-#define ENCRYPTION_MAGIC 0x48576877L // HWhw
+#define ENCRYPTION_MAGIC 0x48576877L /* HWhw */
#define DEF_CRYPT_STR "G?TIUEA]d5MAdZV'eUb&&6.)'&:,'VF/(FR2)6^5*'*8*W6;+GB>,7NA-'ZD-X&G.H2J/8>M0(JP0XVS1HbV29.Y3):\\3YF_4IRb56"
#define DEFAULT_CRYPT_MAC "W\x01\x6B\x66\xA5\x5A"
@@ -115,4 +115,4 @@ int wl_wep_decode( char *szCrypt, void *Dest, char *szData );
-#endif // __WAVELAN2_ENCRYPTION_H__
+#endif /* __WAVELAN2_ENCRYPTION_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 4353561..650def8 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -400,8 +400,8 @@ static p_u32 pc_debug = DBG_LVL;
*/ p_u32 DebugFlag = ~0; //recognizable "undefined value" rather then DBG_DEFAULTS;
//MODULE_PARM(DebugFlag, "l");
-dbg_info_t wl_info = { DBG_MOD_NAME, 0, 0 };
-dbg_info_t *DbgInfo = &wl_info;
+static struct dbg_info wl_info = { KBUILD_MODNAME, 0, 0 };
+struct dbg_info *DbgInfo = &wl_info;
#endif /* DBG */
#ifdef USE_RTS
@@ -434,9 +434,6 @@ int wl_insert( struct net_device *dev )
int i;
unsigned long flags = 0;
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_insert" );
- DBG_ENTER( DbgInfo );
/* Initialize the adapter hardware. */
memset( &( lp->hcfCtx ), 0, sizeof( IFB_STRCT ));
@@ -926,7 +923,6 @@ int wl_insert( struct net_device *dev )
proc_mkdir("driver/wlags49", 0);
#endif /* SCULL_USE_PROC */
- DBG_LEAVE( DbgInfo );
return result;
hcf_failed:
@@ -944,8 +940,6 @@ failed:
result = -EFAULT;
-
- DBG_LEAVE( DbgInfo );
return result;
} // wl_insert
/*============================================================================*/
@@ -972,9 +966,7 @@ int wl_reset(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_reset" );
- DBG_ENTER( DbgInfo );
+
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "dev->base_addr", "(%#03lx)", dev->base_addr );
@@ -1021,7 +1013,6 @@ int wl_reset(struct net_device *dev)
}
out:
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_reset
/*============================================================================*/
@@ -1049,9 +1040,6 @@ int wl_go( struct wl_private *lp )
int hcf_status = HCF_SUCCESS;
char *cp = NULL; //fw_image
int retries = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_go" );
- DBG_ENTER( DbgInfo );
hcf_status = wl_disable( lp );
if ( hcf_status != HCF_SUCCESS ) {
@@ -1148,7 +1136,6 @@ int rc;
}
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Firmware Download failed\n" );
- DBG_LEAVE( DbgInfo );
return hcf_status;
}
}
@@ -1187,7 +1174,6 @@ int rc;
hcf_status = hcf_get_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord ));
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Could not retrieve MAC address\n" );
- DBG_LEAVE( DbgInfo );
return hcf_status;
}
memcpy( lp->MACAddress, &lp->ltvRecord.u.u8[0], ETH_ALEN );
@@ -1206,7 +1192,6 @@ int rc;
#endif // USE_WDS
hcf_status = wl_connect( lp );
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_go
/*============================================================================*/
@@ -1234,9 +1219,7 @@ int rc;
void wl_set_wep_keys( struct wl_private *lp )
{
int count = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_set_wep_keys" );
- DBG_ENTER( DbgInfo );
+
DBG_PARAM( DbgInfo, "lp", "%s (0x%p)", lp->dev->name, lp );
if ( lp->EnableEncryption ) {
/* NOTE: CFG_CNF_ENCRYPTION is set in wl_put_ltv() as it's a static
@@ -1274,8 +1257,6 @@ void wl_set_wep_keys( struct wl_private *lp )
DBG_NOTICE( DbgInfo, "encrypt: %d, ID: %d\n", lp->EnableEncryption, lp->TransmitKeyID );
DBG_NOTICE( DbgInfo, "set key: %s(%d) [%d]\n", lp->DefaultKeys.key[lp->TransmitKeyID-1].key, lp->DefaultKeys.key[lp->TransmitKeyID-1].len, lp->TransmitKeyID-1 );
}
-
- DBG_LEAVE( DbgInfo );
} // wl_set_wep_keys
/*============================================================================*/
@@ -1301,9 +1282,7 @@ void wl_set_wep_keys( struct wl_private *lp )
int wl_apply(struct wl_private *lp)
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_apply" );
- DBG_ENTER( DbgInfo );
+
DBG_ASSERT( lp != NULL);
DBG_PARAM( DbgInfo, "lp", "%s (0x%p)", lp->dev->name, lp );
@@ -1319,13 +1298,11 @@ int wl_apply(struct wl_private *lp)
hcf_status = wl_disconnect( lp );
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Disconnect failed\n" );
- DBG_LEAVE( DbgInfo );
return -1;
}
hcf_status = wl_disable( lp );
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Disable failed\n" );
- DBG_LEAVE( DbgInfo );
return -1;
} else {
/* Write out configuration to the device, enable, and reconnect.
@@ -1347,7 +1324,6 @@ int wl_apply(struct wl_private *lp)
}
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_apply
/*============================================================================*/
@@ -1375,12 +1351,9 @@ int wl_put_ltv_init( struct wl_private *lp )
int i;
int hcf_status;
CFG_RID_LOG_STRCT *RidLog;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_put_ltv_init" );
- DBG_ENTER( DbgInfo );
+
if ( lp == NULL ) {
DBG_ERROR( DbgInfo, "lp pointer is NULL\n" );
- DBG_LEAVE( DbgInfo );
return -1;
}
/* DMA/IO */
@@ -1446,7 +1419,6 @@ int wl_put_ltv_init( struct wl_private *lp )
DBG_TRACE( DbgInfo, "CFG_REG_INFO_LOG\n" );
DBG_TRACE( DbgInfo, "CFG_REG_INFO_LOG result : 0x%04x\n",
hcf_status );
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_put_ltv_init
/*============================================================================*/
@@ -1473,9 +1445,6 @@ int wl_put_ltv( struct wl_private *lp )
{
int len;
int hcf_status;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_put_ltv" );
- DBG_ENTER( DbgInfo );
if ( lp == NULL ) {
DBG_ERROR( DbgInfo, "lp pointer is NULL\n" );
@@ -2013,7 +1982,6 @@ int wl_put_ltv( struct wl_private *lp )
/* Country Code */
/* countryInfo, ltvCountryInfo, CFG_CNF_COUNTRY_INFO */
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_put_ltv
/*============================================================================*/
@@ -2042,7 +2010,6 @@ static int __init wl_module_init( void )
int result;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_module_init" );
#if DBG
/* Convert "standard" PCMCIA parameter pc_debug to a reasonable DebugFlag value.
@@ -2067,7 +2034,6 @@ static int __init wl_module_init( void )
}
#endif /* DBG */
- DBG_ENTER( DbgInfo );
printk(KERN_INFO "%s\n", VERSION_INFO);
printk(KERN_INFO "*** Modified for kernel 2.6 by Henk de Groot <pe1dnn@amsat.org>\n");
printk(KERN_INFO "*** Based on 7.18 version by Andrey Borzenkov <arvidjaar@mail.ru> $Revision: 39 $\n");
@@ -2080,7 +2046,6 @@ static int __init wl_module_init( void )
// #endif /* (HCF_TYPE) & HCF_TYPE_AP */
result = wl_adapter_init_module( );
- DBG_LEAVE( DbgInfo );
return result;
} // init_module
/*============================================================================*/
@@ -2105,16 +2070,10 @@ static int __init wl_module_init( void )
******************************************************************************/
static void __exit wl_module_exit( void )
{
- DBG_FUNC( "wl_module_exit" );
- DBG_ENTER(DbgInfo);
-
wl_adapter_cleanup_module( );
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
remove_proc_entry( "wlags", NULL ); //;?why so a-symmetric compared to location of proc_create_data
#endif
-
- DBG_LEAVE( DbgInfo );
- return;
} // cleanup_module
/*============================================================================*/
@@ -2322,9 +2281,6 @@ void wl_remove( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_remove" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -2356,8 +2312,6 @@ void wl_remove( struct net_device *dev )
#ifdef USE_RTS
if ( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
@@ -2366,9 +2320,6 @@ void wl_remove( struct net_device *dev )
hcf_connect( &lp->hcfCtx, HCF_DISCONNECT );
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_remove
/*============================================================================*/
@@ -2394,9 +2345,6 @@ void wl_suspend( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_suspend" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -2422,9 +2370,6 @@ void wl_suspend( struct net_device *dev )
lp->portState = WVLAN_PORT_STATE_DISABLED;
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_suspend
/*============================================================================*/
@@ -2450,9 +2395,6 @@ void wl_resume(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_resume" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -2474,9 +2416,6 @@ void wl_resume(struct net_device *dev)
wl_act_int_on( lp );
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_resume
/*============================================================================*/
@@ -2504,9 +2443,6 @@ void wl_resume(struct net_device *dev)
void wl_release( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_release" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
/* If wl_remove() hasn't been called (i.e. when Card Services is shut
@@ -2517,9 +2453,6 @@ void wl_release( struct net_device *dev )
lp->is_registered = FALSE;
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_release
/*============================================================================*/
@@ -2593,9 +2526,6 @@ p_s8 * wl_get_irq_list( void )
int wl_enable( struct wl_private *lp )
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_enable" );
- DBG_ENTER( DbgInfo );
if ( lp->portState == WVLAN_PORT_STATE_ENABLED ) {
DBG_TRACE( DbgInfo, "No action: Card already enabled\n" );
@@ -2617,7 +2547,6 @@ int wl_enable( struct wl_private *lp )
if ( hcf_status != HCF_SUCCESS ) { //;?make this an assert
DBG_TRACE( DbgInfo, "failed: 0x%x\n", hcf_status );
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_enable
/*============================================================================*/
@@ -2643,14 +2572,9 @@ int wl_enable( struct wl_private *lp )
******************************************************************************/
void wl_enable_wds_ports( struct wl_private * lp )
{
-
- DBG_FUNC( "wl_enable_wds_ports" );
- DBG_ENTER( DbgInfo );
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ){
DBG_ERROR( DbgInfo, "!!!!;? someone misunderstood something !!!!!\n" );
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_enable_wds_ports
#endif /* USE_WDS */
/*============================================================================*/
@@ -2676,21 +2600,15 @@ void wl_enable_wds_ports( struct wl_private * lp )
int wl_connect( struct wl_private *lp )
{
int hcf_status;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_connect" );
- DBG_ENTER( DbgInfo );
if ( lp->portState != WVLAN_PORT_STATE_ENABLED ) {
DBG_TRACE( DbgInfo, "No action: Not in enabled state\n" );
- DBG_LEAVE( DbgInfo );
return HCF_SUCCESS;
}
hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_CONNECT );
if ( hcf_status == HCF_SUCCESS ) {
lp->portState = WVLAN_PORT_STATE_CONNECTED;
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_connect
/*============================================================================*/
@@ -2716,21 +2634,15 @@ int wl_connect( struct wl_private *lp )
int wl_disconnect( struct wl_private *lp )
{
int hcf_status;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_disconnect" );
- DBG_ENTER( DbgInfo );
if ( lp->portState != WVLAN_PORT_STATE_CONNECTED ) {
DBG_TRACE( DbgInfo, "No action: Not in connected state\n" );
- DBG_LEAVE( DbgInfo );
return HCF_SUCCESS;
}
hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_DISCONNECT );
if ( hcf_status == HCF_SUCCESS ) {
lp->portState = WVLAN_PORT_STATE_ENABLED;
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_disconnect
/*============================================================================*/
@@ -2757,9 +2669,6 @@ int wl_disconnect( struct wl_private *lp )
int wl_disable( struct wl_private *lp )
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_disable" );
- DBG_ENTER( DbgInfo );
if ( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
DBG_TRACE( DbgInfo, "No action: Port state is disabled\n" );
@@ -2779,7 +2688,6 @@ int wl_disable( struct wl_private *lp )
if ( hcf_status != HCF_SUCCESS ) {
DBG_TRACE( DbgInfo, "failed: 0x%x\n", hcf_status );
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_disable
/*============================================================================*/
@@ -2805,10 +2713,6 @@ int wl_disable( struct wl_private *lp )
******************************************************************************/
void wl_disable_wds_ports( struct wl_private * lp )
{
-
- DBG_FUNC( "wl_disable_wds_ports" );
- DBG_ENTER( DbgInfo );
-
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ){
DBG_ERROR( DbgInfo, "!!!!;? someone misunderstood something !!!!!\n" );
}
@@ -2820,7 +2724,6 @@ void wl_disable_wds_ports( struct wl_private * lp )
// wl_disable( lp, HCF_PORT_5 );
// wl_disable( lp, HCF_PORT_6 );
// }
- DBG_LEAVE( DbgInfo );
return;
} // wl_disable_wds_ports
#endif // USE_WDS
@@ -2848,9 +2751,7 @@ void wl_disable_wds_ports( struct wl_private * lp )
int wl_mbx( struct wl_private *lp )
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_mbx" );
- DBG_ENTER( DbgInfo );
+
DBG_TRACE( DbgInfo, "Mailbox Info: IFB_MBInfoLen: %d\n",
lp->hcfCtx.IFB_MBInfoLen );
@@ -2862,19 +2763,15 @@ int wl_mbx( struct wl_private *lp )
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "hcf_get_info returned 0x%x\n", hcf_status );
-
- DBG_LEAVE( DbgInfo );
return hcf_status;
}
- if ( lp->ltvRecord.typ == CFG_MB_INFO ) {
- DBG_LEAVE( DbgInfo );
+ if ( lp->ltvRecord.typ == CFG_MB_INFO )
return hcf_status;
- }
+
/* Endian translate the mailbox data, then process the message */
wl_endian_translate_mailbox( &( lp->ltvRecord ));
wl_process_mailbox( lp );
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_mbx
/*============================================================================*/
@@ -2900,9 +2797,6 @@ int wl_mbx( struct wl_private *lp )
******************************************************************************/
void wl_endian_translate_mailbox( ltv_t *ltv )
{
-
- DBG_FUNC( "wl_endian_translate_mailbox" );
- DBG_ENTER( DbgInfo );
switch( ltv->typ ) {
case CFG_TALLIES:
break;
@@ -2990,9 +2884,6 @@ void wl_endian_translate_mailbox( ltv_t *ltv )
default:
break;
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_endian_translate_mailbox
/*============================================================================*/
@@ -3017,9 +2908,7 @@ void wl_process_mailbox( struct wl_private *lp )
{
ltv_t *ltv;
hcf_16 ltv_val = 0xFFFF;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_process_mailbox" );
- DBG_ENTER( DbgInfo );
+
ltv = &( lp->ltvRecord );
switch( ltv->typ ) {
@@ -3448,8 +3337,6 @@ void wl_process_mailbox( struct wl_private *lp )
DBG_TRACE( DbgInfo, "UNKNOWN MESSAGE: 0x%04x\n", ltv->typ );
break;
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_mailbox
/*============================================================================*/
#endif /* ifndef USE_MBOX_SYNC */
@@ -3477,9 +3364,7 @@ void wl_process_mailbox( struct wl_private *lp )
void wl_wds_netdev_register( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_wds_netdev_register" );
- DBG_ENTER( DbgInfo );
+
//;?why is there no USE_WDS clause like in wl_enable_wds_ports
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
@@ -3496,8 +3381,6 @@ void wl_wds_netdev_register( struct wl_private *lp )
}
}
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_wds_netdev_register
/*============================================================================*/
@@ -3524,9 +3407,7 @@ void wl_wds_netdev_register( struct wl_private *lp )
void wl_wds_netdev_deregister( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_wds_netdev_deregister" );
- DBG_ENTER( DbgInfo );
+
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if ( WVLAN_VALID_MAC_ADDRESS( lp->wds_port[count].wdsAddress )) {
@@ -3535,8 +3416,6 @@ void wl_wds_netdev_deregister( struct wl_private *lp )
lp->wds_port[count].is_registered = FALSE;
}
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_wds_netdev_deregister
/*============================================================================*/
#endif /* USE_WDS */
@@ -3780,9 +3659,6 @@ static int write_int(struct file *file, const char *buffer, unsigned long count,
static char proc_number[11];
unsigned int nr = 0;
- DBG_FUNC( "write_int" );
- DBG_ENTER( DbgInfo );
-
if (count > 9) {
count = -EINVAL;
} else if ( copy_from_user(proc_number, buffer, count) ) {
@@ -3799,7 +3675,6 @@ static int write_int(struct file *file, const char *buffer, unsigned long count,
}
}
DBG_PRINT( "value: %08X\n", nr );
- DBG_LEAVE( DbgInfo );
return count;
} // write_int
@@ -3839,10 +3714,6 @@ void timer_oor( u_long arg )
{
struct wl_private *lp = (struct wl_private *)arg;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "timer_oor" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "arg", "0x%08lx", arg );
printk(KERN_NOTICE "timer_oor: %ld 0x%04X\n", jiffies, lp->timer_oor_cnt ); //;?remove me 1 day
@@ -3856,8 +3727,6 @@ void timer_oor( u_long arg )
lp->timer_oor.data = (unsigned long)lp;
lp->timer_oor.expires = RUN_AT( (lp->timer_oor_cnt & ~DS_OOR) * HZ );
add_timer( &lp->timer_oor );
-
- DBG_LEAVE( DbgInfo );
} // timer_oor
#endif //DN554
diff --git a/drivers/staging/wlags49_h2/wl_main.h b/drivers/staging/wlags49_h2/wl_main.h
index 3b5acdf..3806e74 100644
--- a/drivers/staging/wlags49_h2/wl_main.h
+++ b/drivers/staging/wlags49_h2/wl_main.h
@@ -135,4 +135,4 @@ void wl_wds_netdev_deregister( struct wl_private *lp );
#define WL_WDS_NETDEV_DEREGISTER( ARG )
#endif /* USE_WDS */
-#endif // __WL_MAIN_H__
+#endif /* __WL_MAIN_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 235cc2a..965b1c0 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -115,14 +115,6 @@
#endif /* BUS_PCI */
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
#else
@@ -170,10 +162,6 @@ int wl_init( struct net_device *dev )
{
// unsigned long flags;
// struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_init" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -182,7 +170,6 @@ int wl_init( struct net_device *dev )
// wl_lock( lp, &flags );
// wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return 0;
} // wl_init
/*============================================================================*/
@@ -208,9 +195,6 @@ int wl_init( struct net_device *dev )
******************************************************************************/
int wl_config( struct net_device *dev, struct ifmap *map )
{
- DBG_FUNC( "wl_config" );
- DBG_ENTER( DbgInfo );
-
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "map", "0x%p", map );
@@ -218,7 +202,6 @@ int wl_config( struct net_device *dev, struct ifmap *map )
ignore the request. */
DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
- DBG_LEAVE( DbgInfo );
return 0;
} // wl_config
/*============================================================================*/
@@ -249,10 +232,7 @@ struct net_device_stats *wl_stats( struct net_device *dev )
unsigned long flags;
struct net_device_stats *pStats;
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
- //DBG_FUNC( "wl_stats" );
- //DBG_ENTER( DbgInfo );
//DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
pStats = NULL;
@@ -262,8 +242,6 @@ struct net_device_stats *wl_stats( struct net_device *dev )
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
-
- //DBG_LEAVE( DbgInfo );
return NULL;
}
#endif /* USE_RTS */
@@ -286,8 +264,6 @@ struct net_device_stats *wl_stats( struct net_device *dev )
wl_unlock( lp, &flags );
- //DBG_LEAVE( DbgInfo );
-
return pStats;
} // wl_stats
/*============================================================================*/
@@ -315,10 +291,6 @@ int wl_open(struct net_device *dev)
int status = HCF_SUCCESS;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_open" );
- DBG_ENTER( DbgInfo );
wl_lock( lp, &flags );
@@ -326,7 +298,6 @@ int wl_open(struct net_device *dev)
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
@@ -384,7 +355,6 @@ int wl_open(struct net_device *dev)
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return status;
} // wl_open
/*============================================================================*/
@@ -411,10 +381,7 @@ int wl_close( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC("wl_close");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
/* Mark the adapter as busy */
@@ -440,7 +407,6 @@ int wl_close( struct net_device *dev )
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
@@ -450,7 +416,6 @@ int wl_close( struct net_device *dev )
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return 0;
} // wl_close
/*============================================================================*/
@@ -504,10 +469,7 @@ int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_ioctl" );
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
@@ -563,7 +525,6 @@ out_act_int_on_unlock:
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return ret;
} // wl_ioctl
/*============================================================================*/
@@ -606,10 +567,6 @@ void wl_tx_timeout( struct net_device *dev )
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct net_device_stats *pStats = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_tx_timeout" );
- DBG_ENTER( DbgInfo );
DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
@@ -619,8 +576,6 @@ void wl_tx_timeout( struct net_device *dev )
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
@@ -650,8 +605,6 @@ void wl_tx_timeout( struct net_device *dev )
pStats->tx_errors++;
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
} // wl_tx_timeout
/*============================================================================*/
@@ -683,8 +636,6 @@ int wl_send( struct wl_private *lp )
int len;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_send" );
-
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
@@ -801,8 +752,6 @@ int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
struct list_head *element;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_tx" );
-
/* Grab the spinlock */
wl_lock( lp, &flags );
@@ -895,7 +844,6 @@ int wl_rx(struct net_device *dev)
DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
- DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
@@ -1047,16 +995,11 @@ void wl_multicast( struct net_device *dev )
struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_multicast" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
- if( !wl_adapter_is_open( dev )) {
- DBG_LEAVE( DbgInfo );
+ if( !wl_adapter_is_open( dev ))
return;
- }
#if DBG
if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
@@ -1077,8 +1020,6 @@ void wl_multicast( struct net_device *dev )
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
-
- DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
@@ -1146,7 +1087,6 @@ void wl_multicast( struct net_device *dev )
wl_act_int_on( lp );
wl_unlock( lp, &flags );
}
- DBG_LEAVE( DbgInfo );
#endif /* HCF_STA */
} // wl_multicast
/*============================================================================*/
@@ -1155,16 +1095,11 @@ void wl_multicast( struct net_device *dev )
void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
{
- DBG_FUNC( "wl_multicast");
- DBG_ENTER(DbgInfo);
-
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
#error Obsolete set multicast interface!
-
- DBG_LEAVE( DbgInfo );
} // wl_multicast
/*============================================================================*/
@@ -1213,10 +1148,6 @@ struct net_device * wl_device_alloc( void )
{
struct net_device *dev = NULL;
struct wl_private *lp = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_device_alloc" );
- DBG_ENTER( DbgInfo );
/* Alloc a net_device struct */
dev = alloc_etherdev(sizeof(struct wl_private));
@@ -1253,7 +1184,6 @@ struct net_device * wl_device_alloc( void )
/* Allocate virtual devices for WDS support if needed */
WL_WDS_DEVICE_ALLOC( lp );
- DBG_LEAVE( DbgInfo );
return dev;
} // wl_device_alloc
/*============================================================================*/
@@ -1279,17 +1209,11 @@ struct net_device * wl_device_alloc( void )
void wl_device_dealloc( struct net_device *dev )
{
// struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_device_dealloc" );
- DBG_ENTER( DbgInfo );
/* Dealloc the WDS ports */
WL_WDS_DEVICE_DEALLOC( lp );
free_netdev( dev );
-
- DBG_LEAVE( DbgInfo );
} // wl_device_dealloc
/*============================================================================*/
@@ -1496,10 +1420,6 @@ int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
void wl_wds_device_alloc( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_wds_device_alloc" );
- DBG_ENTER( DbgInfo );
/* WDS support requires additional net_device structs to be allocated,
so that user space apps can use these virtual devices to specify the
@@ -1508,10 +1428,8 @@ void wl_wds_device_alloc( struct wl_private *lp )
struct net_device *dev_wds = NULL;
dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
- if (!dev_wds) {
- DBG_LEAVE(DbgInfo);
+ if (!dev_wds)
return;
- }
ether_setup( dev_wds );
@@ -1542,8 +1460,6 @@ void wl_wds_device_alloc( struct wl_private *lp )
lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
WL_WDS_NETIF_STOP_QUEUE( lp );
-
- DBG_LEAVE( DbgInfo );
} // wl_wds_device_alloc
/*============================================================================*/
@@ -1567,10 +1483,6 @@ void wl_wds_device_alloc( struct wl_private *lp )
void wl_wds_device_dealloc( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_wds_device_dealloc" );
- DBG_ENTER( DbgInfo );
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
@@ -1587,8 +1499,6 @@ void wl_wds_device_dealloc( struct wl_private *lp )
lp->wds_port[count].dev = NULL;
}
}
-
- DBG_LEAVE( DbgInfo );
} // wl_wds_device_dealloc
/*============================================================================*/
@@ -1792,8 +1702,6 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_send_dma" );
-
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
@@ -1882,7 +1790,6 @@ int wl_rx_dma( struct net_device *dev )
//CFG_MB_INFO_RANGE2_STRCT x;
/*------------------------------------------------------------------------*/
- DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if((( lp = dev->priv ) != NULL ) &&
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
deleted file mode 100644
index 6226e5e..0000000
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ /dev/null
@@ -1,1578 +0,0 @@
-/*******************************************************************************
- * Agere Systems Inc.
- * Wireless device driver for Linux (wlags49).
- *
- * Copyright (c) 1998-2003 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- * Initially developed by TriplePoint, Inc.
- * http://www.triplepoint.com
- *
- *------------------------------------------------------------------------------
- *
- * This file contains processing and initialization specific to PCI/miniPCI
- * devices.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2003 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- ******************************************************************************/
-
-/*******************************************************************************
- * include files
- ******************************************************************************/
-#include <wireless/wl_version.h>
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-//#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/bitops.h>
-#include <asm/uaccess.h>
-
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include <hcf/debug.h>
-
-#include <hcf.h>
-#include <dhf.h>
-#include <hcfdef.h>
-
-#include <wireless/wl_if.h>
-#include <wireless/wl_internal.h>
-#include <wireless/wl_util.h>
-#include <wireless/wl_main.h>
-#include <wireless/wl_netdev.h>
-#include <wireless/wl_pci.h>
-
-
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif // DBG
-
-/* define the PCI device Table Cardname and id tables */
-static struct pci_device_id wl_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0), },
- { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1), },
- { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2), },
-
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, wl_pci_tbl);
-
-/*******************************************************************************
- * function prototypes
- ******************************************************************************/
-int wl_pci_probe( struct pci_dev *pdev,
- const struct pci_device_id *ent );
-void wl_pci_remove(struct pci_dev *pdev);
-int wl_pci_setup( struct pci_dev *pdev );
-void wl_pci_enable_cardbus_interrupts( struct pci_dev *pdev );
-
-#ifdef ENABLE_DMA
-int wl_pci_dma_alloc( struct pci_dev *pdev, struct wl_private *lp );
-int wl_pci_dma_free( struct pci_dev *pdev, struct wl_private *lp );
-int wl_pci_dma_alloc_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_free_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_free_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc, int size );
-int wl_pci_dma_free_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_free_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc, int size );
-int wl_pci_dma_free_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc );
-
-void wl_pci_dma_hcf_reclaim_rx( struct wl_private *lp );
-#endif // ENABLE_DMA
-
-/*******************************************************************************
- * PCI module function registration
- ******************************************************************************/
-static struct pci_driver wl_driver = {
- .name = MODULE_NAME,
- .id_table = wl_pci_tbl,
- .probe = wl_pci_probe,
- .remove = wl_pci_remove,
- .suspend = NULL,
- .resume = NULL
-};
-
-/*******************************************************************************
- * wl_adapter_init_module()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by init_module() to perform PCI-specific driver initialization.
- *
- * PARAMETERS:
- *
- * N/A
- *
- * RETURNS:
- *
- * 0
- *
- ******************************************************************************/
-int wl_adapter_init_module( void )
-{
- int result;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_init_module()" );
- DBG_ENTER( DbgInfo );
- DBG_TRACE( DbgInfo, "wl_adapter_init_module() -- PCI\n" );
-
- result = pci_register_driver( &wl_driver ); //;?replace with pci_module_init, Rubini pg 490
- //;? why not do something with the result
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_adapter_init_module
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_cleanup_module()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by cleanup_module() to perform PCI-specific driver cleanup.
- *
- * PARAMETERS:
- *
- * N/A
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_adapter_cleanup_module( void )
-{
- //;?how come wl_adapter_cleanup_module is located in a seemingly pci specific module
- DBG_FUNC( "wl_adapter_cleanup_module" );
- DBG_ENTER( DbgInfo );
-
- //;?DBG_TRACE below feels like nearly redundant in the light of DBG_ENTER above
- DBG_TRACE( DbgInfo, "wl_adapter_cleanup_module() -- PCI\n" );
-
- pci_unregister_driver( &wl_driver );
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_adapter_cleanup_module
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_insert()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by wl_pci_probe() to continue the process of device insertion.
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * TRUE or FALSE
- *
- ******************************************************************************/
-int wl_adapter_insert( struct net_device *dev )
-{
- int result = FALSE;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_insert" );
- DBG_ENTER( DbgInfo );
-
- DBG_TRACE( DbgInfo, "wl_adapter_insert() -- PCI\n" );
-
- if( dev == NULL ) {
- DBG_ERROR( DbgInfo, "net_device pointer is NULL!!!\n" );
- } else if( dev->priv == NULL ) {
- DBG_ERROR( DbgInfo, "wl_private pointer is NULL!!!\n" );
- } else if( wl_insert( dev ) ) { /* Perform remaining device initialization */
- result = TRUE;
- } else {
- DBG_TRACE( DbgInfo, "wl_insert() FAILED\n" );
- }
- DBG_LEAVE( DbgInfo );
- return result;
-} // wl_adapter_insert
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_open()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Open the device.
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * an HCF status code
- *
- ******************************************************************************/
-int wl_adapter_open( struct net_device *dev )
-{
- int result = 0;
- int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_open" );
- DBG_ENTER( DbgInfo );
-
- DBG_TRACE( DbgInfo, "wl_adapter_open() -- PCI\n" );
-
- hcf_status = wl_open( dev );
-
- if( hcf_status != HCF_SUCCESS ) {
- result = -ENODEV;
- }
-
- DBG_LEAVE( DbgInfo );
- return result;
-} // wl_adapter_open
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_close()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Close the device
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * 0
- *
- ******************************************************************************/
-int wl_adapter_close( struct net_device *dev )
-{
- DBG_FUNC( "wl_adapter_close" );
- DBG_ENTER( DbgInfo );
-
- DBG_TRACE( DbgInfo, "wl_adapter_close() -- PCI\n" );
- DBG_TRACE( DbgInfo, "%s: Shutting down adapter.\n", dev->name );
-
- wl_close( dev );
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_adapter_close
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_is_open()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Check whether this device is open. Returns
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * nonzero if device is open.
- *
- ******************************************************************************/
-int wl_adapter_is_open( struct net_device *dev )
-{
- /* This function is used in PCMCIA to check the status of the 'open' field
- in the dev_link_t structure associated with a network device. There
- doesn't seem to be an analog to this for PCI, and checking the status
- contained in the net_device structure doesn't have the same effect.
- For now, return TRUE, but find out if this is necessary for PCI. */
-
- return TRUE;
-} // wl_adapter_is_open
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_probe()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Registered in the pci_driver structure, this function is called when the
- * PCI subsystem finds a new PCI device which matches the information contained
- * in the pci_device_id table.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * ent - this device's entry in the pci_device_id table
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_probe( struct pci_dev *pdev,
- const struct pci_device_id *ent )
-{
- int result;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_probe" );
- DBG_ENTER( DbgInfo );
- DBG_PRINT( "%s\n", VERSION_INFO );
-
- result = wl_pci_setup( pdev );
-
- DBG_LEAVE( DbgInfo );
-
- return result;
-} // wl_pci_probe
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_remove()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Registered in the pci_driver structure, this function is called when the
- * PCI subsystem detects that a PCI device which matches the information
- * contained in the pci_device_id table has been removed.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_remove" );
- DBG_ENTER( DbgInfo );
-
- /* Make sure the pci_dev pointer passed in is valid */
- if( pdev == NULL ) {
- DBG_ERROR( DbgInfo, "PCI subsys passed in an invalid pci_dev pointer\n" );
- return;
- }
-
- dev = pci_get_drvdata( pdev );
- if( dev == NULL ) {
- DBG_ERROR( DbgInfo, "Could not retrieve net_device structure\n" );
- return;
- }
-
- /* Perform device cleanup */
- wl_remove( dev );
- free_irq( dev->irq, dev );
-
-#ifdef ENABLE_DMA
- wl_pci_dma_free( pdev, dev->priv );
-#endif
-
- wl_device_dealloc( dev );
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_remove
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_setup()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by wl_pci_probe() to begin a device's initialization process.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_setup( struct pci_dev *pdev )
-{
- int result = 0;
- struct net_device *dev = NULL;
- struct wl_private *lp = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_setup" );
- DBG_ENTER( DbgInfo );
-
- /* Make sure the pci_dev pointer passed in is valid */
- if( pdev == NULL ) {
- DBG_ERROR( DbgInfo, "PCI subsys passed in an invalid pci_dev pointer\n" );
- return -ENODEV;
- }
-
- result = pci_enable_device( pdev );
- if( result != 0 ) {
- DBG_ERROR( DbgInfo, "pci_enable_device() failed\n" );
- DBG_LEAVE( DbgInfo );
- return result;
- }
-
- /* We found our device! Let's register it with the system */
- DBG_TRACE( DbgInfo, "Found our device, now registering\n" );
- dev = wl_device_alloc( );
- if( dev == NULL ) {
- DBG_ERROR( DbgInfo, "Could not register device!!!\n" );
- DBG_LEAVE( DbgInfo );
- return -ENOMEM;
- }
-
- /* Make sure that space was allocated for our private adapter struct */
- if( dev->priv == NULL ) {
- DBG_ERROR( DbgInfo, "Private adapter struct was not allocated!!!\n" );
- wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
- return -ENOMEM;
- }
-
-#ifdef ENABLE_DMA
- /* Allocate DMA Descriptors */
- if( wl_pci_dma_alloc( pdev, dev->priv ) < 0 ) {
- DBG_ERROR( DbgInfo, "Could not allocate DMA descriptor memory!!!\n" );
- wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
- return -ENOMEM;
- }
-#endif
-
- /* Register our private adapter structure with PCI */
- pci_set_drvdata( pdev, dev );
-
- /* Fill out bus specific information in the net_device struct */
- dev->irq = pdev->irq;
- SET_MODULE_OWNER( dev );
-
- DBG_TRACE( DbgInfo, "Device Base Address: %#03lx\n", pdev->resource[0].start );
- dev->base_addr = pdev->resource[0].start;
-
- /* Initialize our device here */
- if( !wl_adapter_insert( dev )) {
- DBG_ERROR( DbgInfo, "wl_adapter_insert() FAILED!!!\n" );
- wl_device_dealloc( dev );
- DBG_LEAVE( DbgInfo );
- return -EINVAL;
- }
-
- /* Register our ISR */
- DBG_TRACE( DbgInfo, "Registering ISR...\n" );
-
- result = request_irq(dev->irq, wl_isr, SA_SHIRQ, dev->name, dev);
- if( result ) {
- DBG_WARNING( DbgInfo, "Could not register ISR!!!\n" );
- wl_remove(dev);
- wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
- return result;
- }
-
- /* Make sure interrupts are enabled properly for CardBus */
- lp = dev->priv;
-
- if( lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_CARDBUS ||
- lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_PCI ) {
- DBG_TRACE( DbgInfo, "This is a PCI/CardBus card, enable interrupts\n" );
- wl_pci_enable_cardbus_interrupts( pdev );
- }
-
- /* Enable bus mastering */
- pci_set_master( pdev );
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_pci_setup
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_enable_cardbus_interrupts()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by wl_pci_setup() to enable interrupts on a CardBus device. This
- * is done by writing bit 15 to the function event mask register. This
- * CardBus-specific register is located in BAR2 (counting from BAR0), in memory
- * space at byte offset 1f4 (7f4 for WARP).
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_pci_enable_cardbus_interrupts( struct pci_dev *pdev )
-{
- u32 bar2_reg;
- u32 mem_addr_bus;
- u32 func_evt_mask_reg;
- void *mem_addr_kern = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_enable_cardbus_interrupts" );
- DBG_ENTER( DbgInfo );
-
- /* Initialize to known bad values */
- bar2_reg = 0xdeadbeef;
- mem_addr_bus = 0xdeadbeef;
-
- /* Read the BAR2 register; this register contains the base address of the
- memory region where the function event mask register lives */
- pci_read_config_dword( pdev, PCI_BASE_ADDRESS_2, &bar2_reg );
- mem_addr_bus = bar2_reg & PCI_BASE_ADDRESS_MEM_MASK;
-
- /* Once the base address is obtained, remap the memory region to kernel
- space so we can retrieve the register */
- mem_addr_kern = ioremap( mem_addr_bus, 0x200 );
-
-#ifdef HERMES25
-#define REG_OFFSET 0x07F4
-#else
-#define REG_OFFSET 0x01F4
-#endif // HERMES25
-
-#define BIT15 0x8000
-
- /* Retrieve the functional event mask register, enable interrupts by
- setting Bit 15, and write back the value */
- func_evt_mask_reg = *(u32 *)( mem_addr_kern + REG_OFFSET );
- func_evt_mask_reg |= BIT15;
- *(u32 *)( mem_addr_kern + REG_OFFSET ) = func_evt_mask_reg;
-
- /* Once complete, unmap the region and exit */
- iounmap( mem_addr_kern );
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_enable_cardbus_interrupts
-/*============================================================================*/
-
-#ifdef ENABLE_DMA
-/*******************************************************************************
- * wl_pci_dma_alloc()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates all resources needed for PCI/CardBus DMA operation
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc( struct pci_dev *pdev, struct wl_private *lp )
-{
- int i;
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_alloc" );
- DBG_ENTER( DbgInfo );
-
-// lp->dma.tx_rsc_ind = lp->dma.rx_rsc_ind = 0;
-//
-// /* Alloc for the Tx chain and its reclaim descriptor */
-// for( i = 0; i < NUM_TX_DESC; i++ ) {
-// status = wl_pci_dma_alloc_tx_packet( pdev, lp, &lp->dma.tx_packet[i] );
-// if( status == 0 ) {
-// DBG_PRINT( "lp->dma.tx_packet[%d] : 0x%p\n", i, lp->dma.tx_packet[i] );
-// DBG_PRINT( "lp->dma.tx_packet[%d]->next_desc_addr : 0x%p\n", i, lp->dma.tx_packet[i]->next_desc_addr );
-// lp->dma.tx_rsc_ind++;
-// } else {
-// DBG_ERROR( DbgInfo, "Could not alloc DMA Tx Packet\n" );
-// break;
-// }
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, &lp->dma.tx_reclaim_desc );
-// DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc );
-// }
-// /* Alloc for the Rx chain and its reclaim descriptor */
-// if( status == 0 ) {
-// for( i = 0; i < NUM_RX_DESC; i++ ) {
-// status = wl_pci_dma_alloc_rx_packet( pdev, lp, &lp->dma.rx_packet[i] );
-// if( status == 0 ) {
-// DBG_PRINT( "lp->dma.rx_packet[%d] : 0x%p\n", i, lp->dma.rx_packet[i] );
-// DBG_PRINT( "lp->dma.rx_packet[%d]->next_desc_addr : 0x%p\n", i, lp->dma.rx_packet[i]->next_desc_addr );
-// lp->dma.rx_rsc_ind++;
-// } else {
-// DBG_ERROR( DbgInfo, "Could not alloc DMA Rx Packet\n" );
-// break;
-// }
-// }
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, &lp->dma.rx_reclaim_desc );
-// DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc );
-// }
-// /* Store status, as host should not call HCF functions if this fails */
-// lp->dma.status = status; //;?all useages of dma.status have been commented out
-// DBG_LEAVE( DbgInfo );
- return status;
-} // wl_pci_dma_alloc
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Deallocated all resources needed for PCI/CardBus DMA operation
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free( struct pci_dev *pdev, struct wl_private *lp )
-{
- int i;
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_free" );
- DBG_ENTER( DbgInfo );
-
- /* Reclaim all Rx packets that were handed over to the HCF */
- /* Do I need to do this? Before this free is called, I've already disabled
- the port which will call wl_pci_dma_hcf_reclaim */
- //if( lp->dma.status == 0 )
- //{
- // wl_pci_dma_hcf_reclaim( lp );
- //}
-
- /* Free everything needed for DMA Rx */
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- if( lp->dma.rx_packet[i] ) {
- status = wl_pci_dma_free_rx_packet( pdev, lp, &lp->dma.rx_packet[i] );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Rx packet\n" );
- }
- }
- }
- lp->dma.rx_rsc_ind = 0;
-
- if( lp->dma.rx_reclaim_desc ) {
- status = wl_pci_dma_free_desc( pdev, lp, &lp->dma.rx_reclaim_desc );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Rx reclaim descriptor\n" );
- }
- }
-
- /* Free everything needed for DMA Tx */
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] ) {
- status = wl_pci_dma_free_tx_packet( pdev, lp, &lp->dma.tx_packet[i] );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Tx packet\n" );
- }
- }
- }
- lp->dma.tx_rsc_ind = 0;
-
- if( lp->dma.tx_reclaim_desc ) {
- status = wl_pci_dma_free_desc( pdev, lp, &lp->dma.tx_reclaim_desc );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Tx reclaim descriptor\n" );
- }
- }
-
- DBG_LEAVE( DbgInfo );
- return status;
-} // wl_pci_dma_free
-
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates a single Tx packet, consisting of several descriptors and
- * buffers. Data to transmit is first copied into the 'payload' buffer
- * before being transmitted.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
-// int status = 0;
-// /*------------------------------------------------------------------------*/
-//
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc_and_buf( pdev, lp, desc,
-// HCF_DMA_TX_BUF1_SIZE );
-//
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc_and_buf( pdev, lp,
-// &( (*desc)->next_desc_addr ),
-// HCF_MAX_PACKET_SIZE );
-// }
-// }
-// if( status == 0 ) {
-// (*desc)->next_desc_phys_addr = (*desc)->next_desc_addr->desc_phys_addr;
-// }
-// return status;
-} // wl_pci_dma_alloc_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees a single Tx packet, described in the corresponding alloc function.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( *desc == NULL ) {
- DBG_PRINT( "Null descriptor\n" );
- status = -EFAULT;
- }
- //;?the "limited" NDIS strategy, assuming a frame consists ALWAYS out of 2
- //descriptors, make this robust
- if( status == 0 && (*desc)->next_desc_addr ) {
- status = wl_pci_dma_free_desc_and_buf( pdev, lp, &(*desc)->next_desc_addr );
- }
- if( status == 0 ) {
- status = wl_pci_dma_free_desc_and_buf( pdev, lp, desc );
- }
- return status;
-} // wl_pci_dma_free_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_rx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates a single Rx packet, consisting of two descriptors and one
- * contiguous buffer. The buffer starts with the hermes-specific header.
- * One descriptor points at the start, the other at offset 0x3a of the
- * buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// //;?the "limited" NDIS strategy, assuming a frame consists ALWAYS out of 2
-// //descriptors, make this robust
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, desc );
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_buf( pdev, lp, *desc, HCF_MAX_PACKET_SIZE );
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, &p );
-// }
-// if( status == 0 ) {
-// /* Size of 1st descriptor becomes 0x3a bytes */
-// SET_BUF_SIZE( *desc, HCF_DMA_RX_BUF1_SIZE );
-//
-// /* Make 2nd descriptor point at offset 0x3a of the buffer */
-// SET_BUF_SIZE( p, ( HCF_MAX_PACKET_SIZE - HCF_DMA_RX_BUF1_SIZE ));
-// p->buf_addr = (*desc)->buf_addr + HCF_DMA_RX_BUF1_SIZE;
-// p->buf_phys_addr = (*desc)->buf_phys_addr + HCF_DMA_RX_BUF1_SIZE;
-// p->next_desc_addr = NULL;
-//
-// /* Chain 2nd descriptor to 1st descriptor */
-// (*desc)->next_desc_addr = p;
-// (*desc)->next_desc_phys_addr = p->desc_phys_addr;
-// }
-
- return status;
-} // wl_pci_dma_alloc_rx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_rx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees a single Rx packet, described in the corresponding alloc function.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
- if( *desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- p = (*desc)->next_desc_addr;
-
- /* Free the 2nd descriptor */
- if( p != NULL ) {
- p->buf_addr = NULL;
- p->buf_phys_addr = 0;
-
- status = wl_pci_dma_free_desc( pdev, lp, &p );
- }
- }
-
- /* Free the buffer and 1st descriptor */
- if( status == 0 ) {
- SET_BUF_SIZE( *desc, HCF_MAX_PACKET_SIZE );
- status = wl_pci_dma_free_desc_and_buf( pdev, lp, desc );
- }
- return status;
-} // wl_pci_dma_free_rx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_desc_and_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates a DMA descriptor and buffer, and associates them with one
- * another.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc, int size )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, desc );
-//
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_buf( pdev, lp, *desc, size );
-// }
-// }
- return status;
-} // wl_pci_dma_alloc_desc_and_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_desc_and_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees a DMA descriptor and associated buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 && *desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- status = wl_pci_dma_free_buf( pdev, lp, *desc );
-
- if( status == 0 ) {
- status = wl_pci_dma_free_desc( pdev, lp, desc );
- }
- }
- return status;
-} // wl_pci_dma_free_desc_and_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_desc()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates one DMA descriptor in cache coherent memory.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
-// int status = 0;
-// dma_addr_t pa;
-// /*------------------------------------------------------------------------*/
-//
-// DBG_FUNC( "wl_pci_dma_alloc_desc" );
-// DBG_ENTER( DbgInfo );
-//
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// *desc = pci_alloc_consistent( pdev, sizeof( DESC_STRCT ), &pa );
-// }
-// if( *desc == NULL ) {
-// DBG_ERROR( DbgInfo, "pci_alloc_consistent() failed\n" );
-// status = -ENOMEM;
-// } else {
-// memset( *desc, 0, sizeof( DESC_STRCT ));
-// (*desc)->desc_phys_addr = cpu_to_le32( pa );
-// }
-// DBG_LEAVE( DbgInfo );
-// return status;
-} // wl_pci_dma_alloc_desc
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_desc()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees one DMA descriptor in cache coherent memory.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( *desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- pci_free_consistent( pdev, sizeof( DESC_STRCT ), *desc,
- (*desc)->desc_phys_addr );
- }
- *desc = NULL;
- return status;
-} // wl_pci_dma_free_desc
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates one DMA buffer in cache coherent memory, and associates a DMA
- * descriptor with this buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc, int size )
-{
- int status = 0;
- dma_addr_t pa;
- /*------------------------------------------------------------------------*/
-
-// DBG_FUNC( "wl_pci_dma_alloc_buf" );
-// DBG_ENTER( DbgInfo );
-//
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 && desc->buf_addr != NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// desc->buf_addr = pci_alloc_consistent( pdev, size, &pa );
-// }
-// if( desc->buf_addr == NULL ) {
-// DBG_ERROR( DbgInfo, "pci_alloc_consistent() failed\n" );
-// status = -ENOMEM;
-// } else {
-// desc->buf_phys_addr = cpu_to_le32( pa );
-// SET_BUF_SIZE( desc, size );
-// }
-// DBG_LEAVE( DbgInfo );
- return status;
-} // wl_pci_dma_alloc_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates one DMA buffer in cache coherent memory, and associates a DMA
- * descriptor with this buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 && desc->buf_addr == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- pci_free_consistent( pdev, GET_BUF_SIZE( desc ), desc->buf_addr,
- desc->buf_phys_addr );
-
- desc->buf_addr = 0;
- desc->buf_phys_addr = 0;
- SET_BUF_SIZE( desc, 0 );
- }
- return status;
-} // wl_pci_dma_free_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_supply()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Supply HCF with DMA-related resources. These consist of:
- * - buffers and descriptors for receive purposes
- * - one 'reclaim' descriptor for the transmit path, used to fulfill a
- * certain H25 DMA engine requirement
- * - one 'reclaim' descriptor for the receive path, used to fulfill a
- * certain H25 DMA engine requirement
- *
- * This function is called at start-of-day or at re-initialization.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_supply( struct wl_private *lp )
-{
- int i;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_supply" );
- DBG_ENTER( DbgInfo );
-
- //if( lp->dma.status == 0 );
- //{
- /* Hand over the Rx/Tx reclaim descriptors to the HCF */
- if( lp->dma.tx_reclaim_desc ) {
- DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc );
- hcf_dma_tx_put( &lp->hcfCtx, lp->dma.tx_reclaim_desc, 0 );
- lp->dma.tx_reclaim_desc = NULL;
- DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc );
- }
- if( lp->dma.rx_reclaim_desc ) {
- DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc );
- hcf_dma_rx_put( &lp->hcfCtx, lp->dma.rx_reclaim_desc );
- lp->dma.rx_reclaim_desc = NULL;
- DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc );
- }
- /* Hand over the Rx descriptor chain to the HCF */
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- DBG_PRINT( "lp->dma.rx_packet[%d]: 0x%p\n", i, lp->dma.rx_packet[i] );
- hcf_dma_rx_put( &lp->hcfCtx, lp->dma.rx_packet[i] );
- lp->dma.rx_packet[i] = NULL;
- DBG_PRINT( "lp->dma.rx_packet[%d]: 0x%p\n", i, lp->dma.rx_packet[i] );
- }
- //}
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_dma_hcf_supply
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_reclaim()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Return DMA-related resources from the HCF. These consist of:
- * - buffers and descriptors for receive purposes
- * - buffers and descriptors for transmit purposes
- * - one 'reclaim' descriptor for the transmit path, used to fulfill a
- * certain H25 DMA engine requirement
- * - one 'reclaim' descriptor for the receive path, used to fulfill a
- * certain H25 DMA engine requirement
- *
- * This function is called at end-of-day or at re-initialization.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_reclaim( struct wl_private *lp )
-{
- int i;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_reclaim" );
- DBG_ENTER( DbgInfo );
-
- wl_pci_dma_hcf_reclaim_rx( lp );
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- DBG_PRINT( "rx_packet[%d] 0x%p\n", i, lp->dma.rx_packet[i] );
-// if( lp->dma.rx_packet[i] == NULL ) {
-// DBG_PRINT( "wl_pci_dma_hcf_reclaim: rx_packet[%d] NULL\n", i );
-// }
- }
-
- wl_pci_dma_hcf_reclaim_tx( lp );
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- DBG_PRINT( "tx_packet[%d] 0x%p\n", i, lp->dma.tx_packet[i] );
-// if( lp->dma.tx_packet[i] == NULL ) {
-// DBG_PRINT( "wl_pci_dma_hcf_reclaim: tx_packet[%d] NULL\n", i );
-// }
- }
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_dma_hcf_reclaim
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_reclaim_rx()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Reclaim Rx packets that have already been processed by the HCF.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_reclaim_rx( struct wl_private *lp )
-{
- int i;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_reclaim_rx" );
- DBG_ENTER( DbgInfo );
-
- //if( lp->dma.status == 0 )
- //{
- while ( ( p = hcf_dma_rx_get( &lp->hcfCtx ) ) != NULL ) {
- if( p && p->buf_addr == NULL ) {
- /* A reclaim descriptor is being given back by the HCF. Reclaim
- descriptors have a NULL buf_addr */
- lp->dma.rx_reclaim_desc = p;
- DBG_PRINT( "reclaim_descriptor: 0x%p\n", p );
- continue;
- }
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- if( lp->dma.rx_packet[i] == NULL ) {
- break;
- }
- }
- /* An Rx buffer descriptor is being given back by the HCF */
- lp->dma.rx_packet[i] = p;
- lp->dma.rx_rsc_ind++;
- DBG_PRINT( "rx_packet[%d] 0x%p\n", i, lp->dma.rx_packet[i] );
- }
- //}
- DBG_LEAVE( DbgInfo );
-} // wl_pci_dma_hcf_reclaim_rx
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_get_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Obtains a Tx descriptor from the chain to use for Tx.
- *
- * PARAMETERS:
- *
- * lp - a pointer to the device's wl_private structure.
- *
- * RETURNS:
- *
- * A pointer to the retrieved descriptor
- *
- ******************************************************************************/
-DESC_STRCT * wl_pci_dma_get_tx_packet( struct wl_private *lp )
-{
- int i;
- DESC_STRCT *desc = NULL;
- /*------------------------------------------------------------------------*/
-
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] ) {
- break;
- }
- }
-
- if( i != NUM_TX_DESC ) {
- desc = lp->dma.tx_packet[i];
-
- lp->dma.tx_packet[i] = NULL;
- lp->dma.tx_rsc_ind--;
-
- memset( desc->buf_addr, 0, HCF_DMA_TX_BUF1_SIZE );
- }
-
- return desc;
-} // wl_pci_dma_get_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_put_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Returns a Tx descriptor to the chain.
- *
- * PARAMETERS:
- *
- * lp - a pointer to the device's wl_private structure.
- * desc - a pointer to the descriptor to return.
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_pci_dma_put_tx_packet( struct wl_private *lp, DESC_STRCT *desc )
-{
- int i;
- /*------------------------------------------------------------------------*/
-
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] == NULL ) {
- break;
- }
- }
-
- if( i != NUM_TX_DESC ) {
- lp->dma.tx_packet[i] = desc;
- lp->dma.tx_rsc_ind++;
- }
-} // wl_pci_dma_put_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_reclaim_tx()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Reclaim Tx packets that have either been processed by the HCF due to a
- * port disable or a Tx completion.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_reclaim_tx( struct wl_private *lp )
-{
- int i;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_reclaim_tx" );
- DBG_ENTER( DbgInfo );
-
- //if( lp->dma.status == 0 )
- //{
- while ( ( p = hcf_dma_tx_get( &lp->hcfCtx ) ) != NULL ) {
-
- if( p != NULL && p->buf_addr == NULL ) {
- /* A Reclaim descriptor is being given back by the HCF. Reclaim
- descriptors have a NULL buf_addr */
- lp->dma.tx_reclaim_desc = p;
- DBG_PRINT( "reclaim_descriptor: 0x%p\n", p );
- continue;
- }
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] == NULL ) {
- break;
- }
- }
- /* An Rx buffer descriptor is being given back by the HCF */
- lp->dma.tx_packet[i] = p;
- lp->dma.tx_rsc_ind++;
- DBG_PRINT( "tx_packet[%d] 0x%p\n", i, lp->dma.tx_packet[i] );
- }
- //}
-
- if( lp->netif_queue_on == FALSE ) {
- netif_wake_queue( lp->dev );
- WL_WDS_NETIF_WAKE_QUEUE( lp );
- lp->netif_queue_on = TRUE;
- }
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_dma_hcf_reclaim_tx
-/*============================================================================*/
-#endif // ENABLE_DMA
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
deleted file mode 100644
index 86831f1..0000000
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*******************************************************************************
- * Agere Systems Inc.
- * Wireless device driver for Linux (wlags49).
- *
- * Copyright (c) 1998-2003 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- * Initially developed by TriplePoint, Inc.
- * http://www.triplepoint.com
- *
- *------------------------------------------------------------------------------
- *
- * Header describing information required for the driver to support PCI.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2003 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- ******************************************************************************/
-
-#ifndef __WL_PCI_H__
-#define __WL_PCI_H__
-
-
-
-
-/*******************************************************************************
- * constant definitions
- ******************************************************************************/
-#define PCI_VENDOR_IDWL_LKM 0x11C1 /* Lucent Microelectronics */
-#define PCI_DEVICE_ID_WL_LKM_0 0xAB30 /* Mini PCI */
-#define PCI_DEVICE_ID_WL_LKM_1 0xAB34 /* Mini PCI */
-#define PCI_DEVICE_ID_WL_LKM_2 0xAB11 /* WARP CardBus */
-
-
-
-
-/*******************************************************************************
- * function prototypes
- ******************************************************************************/
-int wl_adapter_init_module( void );
-
-void wl_adapter_cleanup_module( void );
-
-int wl_adapter_insert( struct net_device *dev );
-
-int wl_adapter_open( struct net_device *dev );
-
-int wl_adapter_close( struct net_device *dev );
-
-int wl_adapter_is_open( struct net_device *dev );
-
-
-#ifdef ENABLE_DMA
-
-void wl_pci_dma_hcf_supply( struct wl_private *lp );
-
-void wl_pci_dma_hcf_reclaim( struct wl_private *lp );
-
-DESC_STRCT * wl_pci_dma_get_tx_packet( struct wl_private *lp );
-
-void wl_pci_dma_put_tx_packet( struct wl_private *lp, DESC_STRCT *desc );
-
-void wl_pci_dma_hcf_reclaim_tx( struct wl_private *lp );
-
-#endif // ENABLE_DMA
-
-
-#endif // __WL_PCI_H__
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 7e10dcd..41f3324 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -94,16 +94,6 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp);
int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp);
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
-
-
/* If USE_UIL is not defined, then none of the UIL Interface code below will
be included in the build */
#ifdef USE_UIL
@@ -130,10 +120,6 @@ extern dbg_info_t *DbgInfo;
int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
{
int ioctl_ret = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("wvlan_uil");
- DBG_ENTER(DbgInfo);
switch (urq->command) {
case UIL_FUN_CONNECT:
@@ -165,7 +151,6 @@ int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
ioctl_ret = -EOPNOTSUPP;
break;
}
- DBG_LEAVE(DbgInfo);
return ioctl_ret;
} /* wvlan_uil */
/*============================================================================*/
@@ -195,12 +180,6 @@ int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_connect");
- DBG_ENTER(DbgInfo);
-
if (!(lp->flags & WVLAN2_UIL_CONNECTED)) {
lp->flags |= WVLAN2_UIL_CONNECTED;
@@ -211,7 +190,6 @@ int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_IN_USE;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_connect */
/*============================================================================*/
@@ -241,12 +219,6 @@ int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_disconnect(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_disconnect");
- DBG_ENTER(DbgInfo);
-
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (lp->flags & WVLAN2_UIL_CONNECTED) {
@@ -266,7 +238,6 @@ int wvlan_uil_disconnect(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_disconnect */
/*============================================================================*/
@@ -297,12 +268,6 @@ int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
ltv_t *ltv;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_action");
- DBG_ENTER(DbgInfo);
-
if (urq->hcfCtx == &(lp->hcfCtx)) {
/* Make sure there's an LTV in the request buffer */
@@ -344,7 +309,6 @@ int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_action */
/*============================================================================*/
@@ -376,11 +340,6 @@ int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_block");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -398,7 +357,6 @@ int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_block */
/*============================================================================*/
@@ -428,11 +386,6 @@ int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_unblock(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_unblock");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -451,7 +404,6 @@ int wvlan_uil_unblock(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_unblock */
/*============================================================================*/
@@ -482,11 +434,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
DESC_STRCT Descp[1];
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_send_diag_msg");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -499,7 +446,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
if (result != 0) {
DBG_ERROR(DbgInfo, "verify_area failed, result: %d\n", result);
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -518,7 +464,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
DBG_ERROR(DbgInfo, "ENOMEM\n");
urq->result = UIL_FAILURE;
result = -ENOMEM;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -539,7 +484,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_send_diag_msg */
/*============================================================================*/
@@ -575,10 +519,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
#ifdef USE_WDS
hcf_16 hcfPort = HCF_PORT_0;
#endif /* USE_WDS */
- /*------------------------------------------------------------------------*/
- DBG_FUNC("wvlan_uil_put_info");
- DBG_ENTER(DbgInfo);
-
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -589,7 +529,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_LEN;
DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -598,7 +537,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
if (result != 0) {
urq->result = UIL_FAILURE;
DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -611,7 +549,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -627,7 +564,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
result = -ENOMEM;
- DBG_LEAVE(DbgInfo);
return result;
}
} else {
@@ -1161,7 +1097,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_put_info */
@@ -1191,10 +1126,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
int i;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("wvlan_uil_get_info");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if ((urq->data != NULL) && (urq->len != 0)) {
@@ -1207,7 +1138,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1216,7 +1146,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
if (result != 0) {
DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1229,7 +1158,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
DBG_ERROR(DbgInfo, "Incoming LTV too big\n");
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1513,7 +1441,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_get_info */
/*============================================================================*/
@@ -1544,18 +1471,11 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("cfg_driver_info");
- DBG_ENTER(DbgInfo);
-
/* Make sure that user buffer can handle the driver information buffer */
if (urq->len < sizeof(lp->driverInfo)) {
urq->len = sizeof(lp->driverInfo);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1563,7 +1483,6 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
result = verify_area(VERIFY_WRITE, urq->data, sizeof(lp->driverInfo));
if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1573,7 +1492,6 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_SUCCESS;
copy_to_user(urq->data, &(lp->driverInfo), sizeof(lp->driverInfo));
- DBG_LEAVE(DbgInfo);
return result;
} /* cfg_driver_info */
/*============================================================================*/
@@ -1603,18 +1521,11 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_driver_identity");
- DBG_ENTER(DbgInfo);
-
/* Make sure that user buffer can handle the driver identity structure. */
if (urq->len < sizeof(lp->driverIdentity)) {
urq->len = sizeof(lp->driverIdentity);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1622,7 +1533,6 @@ int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
result = verify_area(VERIFY_WRITE, urq->data, sizeof(lp->driverIdentity));
if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1630,7 +1540,6 @@ int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_SUCCESS;
copy_to_user(urq->data, &(lp->driverIdentity), sizeof(lp->driverIdentity));
- DBG_LEAVE(DbgInfo);
return result;
} /* cfg_driver_identity */
/*============================================================================*/
@@ -1672,11 +1581,6 @@ int wvlan_set_netname(struct net_device *dev,
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_set_netname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1687,7 +1591,6 @@ int wvlan_set_netname(struct net_device *dev,
wl_apply(lp);
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_set_netname */
/*============================================================================*/
@@ -1724,11 +1627,6 @@ int wvlan_get_netname(struct net_device *dev,
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_get_netname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1751,7 +1649,6 @@ int wvlan_get_netname(struct net_device *dev,
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_get_netname */
/*============================================================================*/
@@ -1787,11 +1684,6 @@ int wvlan_set_station_nickname(struct net_device *dev,
unsigned long flags;
size_t len;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_set_station_nickname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1803,7 +1695,6 @@ int wvlan_set_station_nickname(struct net_device *dev,
wl_apply(lp);
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_set_station_nickname */
/*============================================================================*/
@@ -1840,11 +1731,6 @@ int wvlan_get_station_nickname(struct net_device *dev,
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_get_station_nickname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1867,7 +1753,6 @@ int wvlan_get_station_nickname(struct net_device *dev,
wl_unlock(lp, &flags);
/* out: */
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_get_station_nickname */
/*============================================================================*/
@@ -1903,11 +1788,6 @@ int wvlan_set_porttype(struct net_device *dev,
unsigned long flags;
int ret = 0;
hcf_16 portType;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_set_porttype");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1928,7 +1808,6 @@ out_unlock:
wl_unlock(lp, &flags);
/* out: */
- DBG_LEAVE(DbgInfo);
return ret;
}
@@ -1965,11 +1844,6 @@ int wvlan_get_porttype(struct net_device *dev,
int status = -1;
hcf_16 *pPortType;
__u32 *pData = (__u32 *)extra;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_get_porttype");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1990,7 +1864,6 @@ int wvlan_get_porttype(struct net_device *dev,
wl_unlock(lp, &flags);
/* out: */
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_get_porttype */
/*============================================================================*/
@@ -2023,12 +1896,6 @@ int wvlan_get_porttype(struct net_device *dev,
int wvlan_rts(struct rtsreq *rrq, __u32 io_base)
{
int ioctl_ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_rts");
- DBG_ENTER(DbgInfo);
-
DBG_PRINT("io_base: 0x%08x\n", io_base);
@@ -2060,7 +1927,6 @@ int wvlan_rts(struct rtsreq *rrq, __u32 io_base)
break;
}
- DBG_LEAVE(DbgInfo);
return ioctl_ret;
} /* wvlan_rts */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index beabf59..28cc576 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -101,16 +101,11 @@
#include <wl_profile.h>
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-
/* Definition needed to prevent unresolved external in unistd.h */
static int errno;
#if DBG
extern p_u32 DebugFlag;
-extern dbg_info_t *DbgInfo;
#endif
int parse_yes_no(char *value);
@@ -163,10 +158,6 @@ void parse_config(struct net_device *dev)
mm_segment_t fs;
struct wl_private *wvlan_config = NULL;
ENCSTRCT sEncryption;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("parse_config");
- DBG_ENTER(DbgInfo);
/* Get the wavelan specific info for this device */
wvlan_config = dev->priv;
@@ -272,7 +263,6 @@ void parse_config(struct net_device *dev)
set_fs(fs); /* Return to the original context */
#endif /* BIN_DL */
- DBG_LEAVE(DbgInfo);
return;
} /* parse_config */
@@ -354,8 +344,6 @@ void translate_option(char *buffer, struct wl_private *lp)
u_char mac_value[ETH_ALEN];
/*------------------------------------------------------------------------*/
- DBG_FUNC("translate_option");
-
if (buffer == NULL || lp == NULL) {
DBG_ERROR(DbgInfo, "Config file buffer and/or wavelan buffer ptr NULL\n");
return;
@@ -959,10 +947,6 @@ void ParseConfigLine(char *pszLine, char **ppszLVal, char **ppszRVal)
{
int i;
int size;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("ParseConfigLine");
- DBG_ENTER(DbgInfo);
/* get a snapshot of our string size */
size = strlen(pszLine);
@@ -1005,7 +989,6 @@ void ParseConfigLine(char *pszLine, char **ppszLVal, char **ppszRVal)
pszLine[i] = '\0';
}
}
- DBG_LEAVE(DbgInfo);
} /* ParseConfigLine */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 404ec7d..4ca6e42 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -128,13 +128,6 @@ static const long chan_freq_list[][2] =
{161,5805}
};
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
-
-
/*******************************************************************************
* dbm()
*******************************************************************************
@@ -481,10 +474,6 @@ void wl_hcf_error( struct net_device *dev, int hcfStatus )
******************************************************************************/
void wl_endian_translate_event( ltv_t *pLtv )
{
- DBG_FUNC( "wl_endian_translate_event" );
- DBG_ENTER( DbgInfo );
-
-
switch( pLtv->typ ) {
case CFG_TALLIES:
break;
@@ -582,9 +571,6 @@ void wl_endian_translate_event( ltv_t *pLtv )
default:
break;
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_endian_translate_event
/*============================================================================*/
@@ -997,10 +983,6 @@ int wl_get_chan_from_freq( long frequency )
void wl_process_link_status( struct wl_private *lp )
{
hcf_16 link_stat;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_process_link_status" );
- DBG_ENTER( DbgInfo );
if( lp != NULL ) {
//link_stat = lp->hcfCtx.IFB_DSLinkStat & CFG_LINK_STAT_FW;
@@ -1027,8 +1009,6 @@ void wl_process_link_status( struct wl_private *lp )
break;
}
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_link_status
/*============================================================================*/
@@ -1058,12 +1038,6 @@ void wl_process_probe_response( struct wl_private *lp )
PROBE_RESP *probe_rsp;
hcf_8 *wpa_ie = NULL;
hcf_16 wpa_ie_len = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_process_probe_response" );
- DBG_ENTER( DbgInfo );
-
if( lp != NULL ) {
probe_rsp = (PROBE_RESP *)&lp->ProbeResp;
@@ -1235,9 +1209,6 @@ void wl_process_probe_response( struct wl_private *lp )
}
}
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_probe_response
/*============================================================================*/
@@ -1263,10 +1234,6 @@ void wl_process_probe_response( struct wl_private *lp )
******************************************************************************/
void wl_process_updated_record( struct wl_private *lp )
{
- DBG_FUNC( "wl_process_updated_record" );
- DBG_ENTER( DbgInfo );
-
-
if( lp != NULL ) {
lp->updatedRecord.u.u16[0] = CNV_LITTLE_TO_INT( lp->updatedRecord.u.u16[0] );
@@ -1286,9 +1253,6 @@ void wl_process_updated_record( struct wl_private *lp )
lp->updatedRecord.u.u16[0] );
}
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_updated_record
/*============================================================================*/
@@ -1315,12 +1279,6 @@ void wl_process_updated_record( struct wl_private *lp )
void wl_process_assoc_status( struct wl_private *lp )
{
ASSOC_STATUS_STRCT *assoc_stat;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_process_assoc_status" );
- DBG_ENTER( DbgInfo );
-
if( lp != NULL ) {
assoc_stat = (ASSOC_STATUS_STRCT *)&lp->assoc_stat;
@@ -1353,9 +1311,6 @@ void wl_process_assoc_status( struct wl_private *lp )
assoc_stat->oldApAddr);
}
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_assoc_status
/*============================================================================*/
@@ -1382,12 +1337,6 @@ void wl_process_assoc_status( struct wl_private *lp )
void wl_process_security_status( struct wl_private *lp )
{
SECURITY_STATUS_STRCT *sec_stat;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_process_security_status" );
- DBG_ENTER( DbgInfo );
-
if( lp != NULL ) {
sec_stat = (SECURITY_STATUS_STRCT *)&lp->sec_stat;
@@ -1425,9 +1374,6 @@ void wl_process_security_status( struct wl_private *lp )
DBG_TRACE(DbgInfo, "Reason : 0x%04x\n", sec_stat->reason);
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_security_status
/*============================================================================*/
@@ -1438,9 +1384,6 @@ int wl_get_tallies(struct wl_private *lp,
int status;
CFG_HERMES_TALLIES_STRCT *pTallies;
- DBG_FUNC( "wl_get_tallies" );
- DBG_ENTER(DbgInfo);
-
/* Get the current tallies from the adapter */
lp->ltvRecord.len = 1 + HCF_TOT_TAL_CNT * sizeof(hcf_16);
lp->ltvRecord.typ = CFG_TALLIES;
@@ -1456,8 +1399,6 @@ int wl_get_tallies(struct wl_private *lp,
ret = -EFAULT;
}
- DBG_LEAVE( DbgInfo );
-
return ret;
}
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index 037b526..bbc484a 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -115,42 +115,12 @@ err: define bus type;
#define DRV_VARIANT 2
#endif // HERMES25
-#ifdef BUS_PCMCIA
-#if defined HERMES25
-#define MODULE_NAME DRIVER_NAME "_h25_cs"
-#else
-#define MODULE_NAME DRIVER_NAME "_h2_cs"
-#endif /* HERMES25 */
-#elif defined BUS_PCI
-#if defined HERMES25
-#define MODULE_NAME DRIVER_NAME "_h25"
-#else
-#define MODULE_NAME DRIVER_NAME "_h2"
-#endif /* HERMES25 */
-#endif /* BUS_XXX */
-
-#ifdef DBG
-#define MODULE_DATE __DATE__ " " __TIME__
-#else
-#define MODULE_DATE "07/18/2004 13:30:00"
-#endif // DBG
-
-//#define STR2(m) #m
-//#define STR1(m) STR2(m)
-//#define MODULE_NAME STR1( MOD_NAME )
-
-#define VERSION_INFO MODULE_NAME " v" DRV_VERSION_STR \
- " for " BUS_TYPE ", " \
- MODULE_DATE " by " VENDOR_NAME
+#define VERSION_INFO KBUILD_MODNAME " v" DRV_VERSION_STR \
+ " for " BUS_TYPE ", by " VENDOR_NAME
/* The version of wireless extensions we support */
#define WIRELESS_SUPPORT 21
-//#define DBG_MOD_NAME DRIVER_NAME ":" BUS_TYPE ":" HW_TYPE ":" FW_TYPE
-#define DBG_MOD_NAME MODULE_NAME
-
-
-
/*******************************************************************************
* bus architecture specific defines, includes, etc.
******************************************************************************/
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index c731ff2..4a1ddaf5 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -76,14 +76,6 @@
#include <wl_wext.h>
#include <wl_priv.h>
-/*******************************************************************************
- * global definitions
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif // DBG
-
-
/* Set up the LTV to program the appropriate key */
static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
int set_tx, u8 *seq, u8 *key, size_t key_len)
@@ -93,8 +85,6 @@ static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
hcf_8 tsc[IW_ENCODE_SEQ_MAX_SIZE] =
{ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00 };
- DBG_ENTER(DbgInfo);
-
/*
* Check the key index here; if 0, load as Pairwise Key, otherwise,
* load as a group key. Note that for the Hermes, the RIDs for
@@ -163,7 +153,6 @@ static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
break;
}
- DBG_LEAVE(DbgInfo);
return ret;
}
@@ -327,10 +316,6 @@ static int wireless_commit(struct net_device *dev,
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_commit" );
- DBG_ENTER(DbgInfo);
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -348,7 +333,6 @@ static int wireless_commit(struct net_device *dev,
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_commit
/*============================================================================*/
@@ -376,16 +360,12 @@ out:
******************************************************************************/
static int wireless_get_protocol(struct net_device *dev, struct iw_request_info *info, char *name, char *extra)
{
- DBG_FUNC( "wireless_get_protocol" );
- DBG_ENTER( DbgInfo );
-
/* Originally, the driver was placing the string "Wireless" here. However,
the wireless extensions (/linux/wireless.h) indicate this string should
describe the wireless protocol. */
strcpy(name, "IEEE 802.11b");
- DBG_LEAVE(DbgInfo);
return 0;
} // wireless_get_protocol
/*============================================================================*/
@@ -418,11 +398,6 @@ static int wireless_set_frequency(struct net_device *dev, struct iw_request_info
unsigned long flags;
int channel = 0;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_frequency" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -431,7 +406,6 @@ static int wireless_set_frequency(struct net_device *dev, struct iw_request_info
if( !capable( CAP_NET_ADMIN )) {
ret = -EPERM;
- DBG_LEAVE( DbgInfo );
return ret;
}
@@ -473,7 +447,6 @@ static int wireless_set_frequency(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_frequency
/*============================================================================*/
@@ -505,11 +478,6 @@ static int wireless_get_frequency(struct net_device *dev, struct iw_request_info
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = -1;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_frequency" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -538,7 +506,6 @@ static int wireless_get_frequency(struct net_device *dev, struct iw_request_info
ret = (ret == HCF_SUCCESS ? 0 : -EFAULT);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_frequency
/*============================================================================*/
@@ -576,11 +543,6 @@ static int wireless_get_range(struct net_device *dev, struct iw_request_info *in
int count;
__u16 *pTxRate;
int retries = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_range" );
- DBG_ENTER( DbgInfo );
/* Set range information */
data->length = sizeof(struct iw_range);
@@ -748,7 +710,6 @@ out_unlock:
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_get_range
/*============================================================================*/
@@ -781,11 +742,6 @@ static int wireless_get_bssid(struct net_device *dev, struct iw_request_info *in
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
int status = -1;
#endif /* (HCF_TYPE) & HCF_TYPE_STA */
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_bssid" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -828,7 +784,6 @@ static int wireless_get_bssid(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_get_bssid
/*============================================================================*/
@@ -874,10 +829,6 @@ static int wireless_get_ap_list (struct net_device *dev, struct iw_request_info
#else
ProbeResult *p = &lp->probe_results;
#endif // WARP
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_get_ap_list" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -977,7 +928,6 @@ static int wireless_get_ap_list (struct net_device *dev, struct iw_request_info
}
}
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_ap_list
/*============================================================================*/
@@ -1010,11 +960,6 @@ static int wireless_set_sensitivity(struct net_device *dev, struct iw_request_in
unsigned long flags;
int ret = 0;
int dens = sens->value;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_sensitivity" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1038,7 +983,6 @@ static int wireless_set_sensitivity(struct net_device *dev, struct iw_request_in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_sensitivity
/*============================================================================*/
@@ -1069,12 +1013,6 @@ static int wireless_get_sensitivity(struct net_device *dev, struct iw_request_in
{
struct wl_private *lp = wl_priv(dev);
int ret = 0;
- /*------------------------------------------------------------------------*/
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_sensitivity" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1085,7 +1023,6 @@ static int wireless_get_sensitivity(struct net_device *dev, struct iw_request_in
sens->value = lp->DistanceBetweenAPs;
sens->fixed = 0; /* auto */
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_sensitivity
/*============================================================================*/
@@ -1119,9 +1056,6 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
unsigned long flags;
int ret = 0;
- DBG_FUNC( "wireless_set_essid" );
- DBG_ENTER( DbgInfo );
-
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -1165,7 +1099,6 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_essid
/*============================================================================*/
@@ -1201,11 +1134,6 @@ static int wireless_get_essid(struct net_device *dev, struct iw_request_info *in
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_essid" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1300,7 +1228,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_essid
/*============================================================================*/
@@ -1335,8 +1262,6 @@ static int wireless_set_encode(struct net_device *dev, struct iw_request_info *i
int ret = 0;
bool enable = true;
- DBG_ENTER(DbgInfo);
-
if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -1361,7 +1286,6 @@ static int wireless_set_encode(struct net_device *dev, struct iw_request_info *i
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
}
@@ -1391,11 +1315,7 @@ static int wireless_get_encode(struct net_device *dev, struct iw_request_info *i
unsigned long flags;
int ret = 0;
int index;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_get_encode" );
- DBG_ENTER( DbgInfo );
DBG_NOTICE(DbgInfo, "GIWENCODE: encrypt: %d, ID: %d\n", lp->EnableEncryption, lp->TransmitKeyID);
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
@@ -1406,7 +1326,6 @@ static int wireless_get_encode(struct net_device *dev, struct iw_request_info *i
/* Only super-user can see WEP key */
if( !capable( CAP_NET_ADMIN )) {
ret = -EPERM;
- DBG_LEAVE( DbgInfo );
return ret;
}
@@ -1450,7 +1369,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_encode
/*============================================================================*/
@@ -1482,11 +1400,6 @@ static int wireless_set_nickname(struct net_device *dev, struct iw_request_info
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_nickname" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1496,7 +1409,6 @@ static int wireless_set_nickname(struct net_device *dev, struct iw_request_info
#if 0 //;? Needed, was present in original code but not in 7.18 Linux 2.6 kernel version
if( !capable(CAP_NET_ADMIN )) {
ret = -EPERM;
- DBG_LEAVE( DbgInfo );
return ret;
}
#endif
@@ -1523,7 +1435,6 @@ static int wireless_set_nickname(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_nickname
/*============================================================================*/
@@ -1557,11 +1468,6 @@ static int wireless_get_nickname(struct net_device *dev, struct iw_request_info
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_nickname" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1600,7 +1506,6 @@ static int wireless_get_nickname(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_get_nickname
/*============================================================================*/
@@ -1634,10 +1539,6 @@ static int wireless_set_porttype(struct net_device *dev, struct iw_request_info
int ret = 0;
hcf_16 portType;
hcf_16 createIBSS;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_set_porttype" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1714,7 +1615,6 @@ static int wireless_set_porttype(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_porttype
/*============================================================================*/
@@ -1749,11 +1649,6 @@ static int wireless_get_porttype(struct net_device *dev, struct iw_request_info
int ret = 0;
int status = -1;
hcf_16 *pPortType;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_porttype" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1824,7 +1719,6 @@ static int wireless_get_porttype(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_porttype
/*============================================================================*/
@@ -1856,11 +1750,6 @@ static int wireless_set_power(struct net_device *dev, struct iw_request_info *in
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_power" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1872,8 +1761,6 @@ static int wireless_set_power(struct net_device *dev, struct iw_request_info *in
#if 0 //;? Needed, was present in original code but not in 7.18 Linux 2.6 kernel version
if( !capable( CAP_NET_ADMIN )) {
ret = -EPERM;
-
- DBG_LEAVE( DbgInfo );
return ret;
}
#endif
@@ -1897,7 +1784,6 @@ static int wireless_set_power(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_power
/*============================================================================*/
@@ -1930,9 +1816,6 @@ static int wireless_get_power(struct net_device *dev, struct iw_request_info *in
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wireless_get_power" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1959,7 +1842,6 @@ static int wireless_get_power(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_power
/*============================================================================*/
@@ -1991,9 +1873,6 @@ static int wireless_get_tx_power(struct net_device *dev, struct iw_request_info
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wireless_get_tx_power" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2019,7 +1898,6 @@ static int wireless_get_tx_power(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_tx_power
/*============================================================================*/
@@ -2052,11 +1930,6 @@ static int wireless_set_rts_threshold (struct net_device *dev, struct iw_request
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int rthr = rts->value;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_rts_threshold" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2090,7 +1963,6 @@ static int wireless_set_rts_threshold (struct net_device *dev, struct iw_request
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_rts_threshold
/*============================================================================*/
@@ -2122,10 +1994,6 @@ static int wireless_get_rts_threshold (struct net_device *dev, struct iw_request
int ret = 0;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_get_rts_threshold" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2147,7 +2015,6 @@ static int wireless_get_rts_threshold (struct net_device *dev, struct iw_request
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_rts_threshold
/*============================================================================*/
@@ -2184,11 +2051,6 @@ static int wireless_set_rate(struct net_device *dev, struct iw_request_info *inf
int status = -1;
int index = 0;
#endif // WARP
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_rate" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2214,7 +2076,6 @@ static int wireless_set_rate(struct net_device *dev, struct iw_request_info *inf
DBG_PRINT( "Index: %d\n", index );
} else {
DBG_ERROR( DbgInfo, "Could not determine radio frequency\n" );
- DBG_LEAVE( DbgInfo );
ret = -EINVAL;
goto out_unlock;
}
@@ -2375,7 +2236,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_rate
/*============================================================================*/
@@ -2410,11 +2270,6 @@ static int wireless_get_rate(struct net_device *dev, struct iw_request_info *inf
int ret = 0;
int status = -1;
hcf_16 txRate;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_rate" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2490,7 +2345,6 @@ static int wireless_get_rate(struct net_device *dev, struct iw_request_info *inf
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_rate
/*============================================================================*/
@@ -2522,11 +2376,6 @@ out:
int wireless_get_private_interface( struct iwreq *wrq, struct wl_private *lp )
{
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_private_interface" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2547,10 +2396,8 @@ int wireless_get_private_interface( struct iwreq *wrq, struct wl_private *lp )
/* Verify the user buffer */
ret = verify_area( VERIFY_WRITE, wrq->u.data.pointer, sizeof( priv ));
- if( ret != 0 ) {
- DBG_LEAVE( DbgInfo );
+ if( ret != 0 )
return ret;
- }
/* Copy the data into the user's buffer */
wrq->u.data.length = NELEM( priv );
@@ -2558,7 +2405,6 @@ int wireless_get_private_interface( struct iwreq *wrq, struct wl_private *lp )
}
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_private_interface
/*============================================================================*/
@@ -2592,13 +2438,9 @@ static int wireless_set_scan(struct net_device *dev, struct iw_request_info *inf
int ret = 0;
int status = -1;
int retries = 0;
- /*------------------------------------------------------------------------*/
//;? Note: shows results as trace, returns always 0 unless BUSY
- DBG_FUNC( "wireless_set_scan" );
- DBG_ENTER( DbgInfo );
-
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -2694,7 +2536,6 @@ retry:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_set_scan
/*============================================================================*/
@@ -2734,11 +2575,6 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
hcf_8 msg[512];
hcf_8 *wpa_ie;
hcf_16 wpa_ie_len;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_scan" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2888,7 +2724,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_scan
/*============================================================================*/
@@ -2923,9 +2758,6 @@ static int wireless_set_auth(struct net_device *dev,
int iwa_idx = data->flags & IW_AUTH_INDEX;
int iwa_val = data->value;
- DBG_FUNC( "wireless_set_auth" );
- DBG_ENTER( DbgInfo );
-
if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -3038,7 +2870,6 @@ static int wireless_set_auth(struct net_device *dev,
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_auth
/*============================================================================*/
@@ -3087,8 +2918,6 @@ static int wireless_set_encodeext(struct net_device *dev,
bool enable = true;
bool set_tx = false;
- DBG_ENTER(DbgInfo);
-
if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -3114,7 +2943,6 @@ static int wireless_set_encodeext(struct net_device *dev,
if (sizeof(ext->rx_seq) != 8) {
DBG_TRACE(DbgInfo, "rx_seq size mismatch\n");
- DBG_LEAVE(DbgInfo);
ret = -EINVAL;
goto out_unlock;
}
@@ -3188,7 +3016,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
}
/*============================================================================*/
@@ -3202,13 +3029,10 @@ static int wireless_set_genie(struct net_device *dev,
{
int ret = 0;
- DBG_ENTER(DbgInfo);
-
/* We can't write this to the card, but apparently this
* operation needs to succeed */
ret = 0;
- DBG_LEAVE(DbgInfo);
return ret;
}
/*============================================================================*/
@@ -3237,11 +3061,7 @@ struct iw_statistics * wl_wireless_stats( struct net_device *dev )
{
struct iw_statistics *pStats;
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_wireless_stats" );
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
pStats = NULL;
@@ -3302,7 +3122,6 @@ struct iw_statistics * wl_wireless_stats( struct net_device *dev )
}
}
- DBG_LEAVE( DbgInfo );
return pStats;
} // wl_wireless_stats
/*============================================================================*/
@@ -3336,10 +3155,6 @@ struct iw_statistics * wl_get_wireless_stats( struct net_device *dev )
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct iw_statistics *pStats = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_get_wireless_stats" );
- DBG_ENTER(DbgInfo);
wl_lock( lp, &flags );
@@ -3357,7 +3172,6 @@ struct iw_statistics * wl_get_wireless_stats( struct net_device *dev )
wl_unlock(lp, &flags);
- DBG_LEAVE( DbgInfo );
return pStats;
} // wl_get_wireless_stats
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index 029da52..4a85dc88 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -85,4 +85,4 @@ void wl_wext_event_assoc_ie( struct net_device *dev );
extern const struct iw_handler_def wl_iw_handler_def;
-#endif // __WL_WEXT_H__
+#endif /* __WL_WEXT_H__ */
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index a4fd5c4..a7d24c9 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -73,7 +73,8 @@ static int prism2_result2err(int prism2_result)
static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
{
struct p80211msg_dot11req_mibset msg;
- p80211item_uint32_t *mibitem = (p80211item_uint32_t *) &msg.mibattribute.data;
+ p80211item_uint32_t *mibitem =
+ (p80211item_uint32_t *) &msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
@@ -86,7 +87,8 @@ static int prism2_domibset_pstr32(wlandevice_t *wlandev,
u32 did, u8 len, u8 *data)
{
struct p80211msg_dot11req_mibset msg;
- p80211item_pstr32_t *mibitem = (p80211item_pstr32_t *) &msg.mibattribute.data;
+ p80211item_pstr32_t *mibitem =
+ (p80211item_pstr32_t *) &msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
@@ -182,7 +184,8 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
goto exit;
}
- result = prism2_domibset_pstr32(wlandev, did, params->key_len, params->key);
+ result = prism2_domibset_pstr32(wlandev, did,
+ params->key_len, params->key);
if (result)
goto exit;
break;
@@ -328,7 +331,8 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
return result;
}
-static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+static int prism2_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
{
struct net_device *dev;
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
@@ -380,7 +384,8 @@ static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *reques
(i < request->n_channels) && i < ARRAY_SIZE(prism2_channels);
i++)
msg1.channellist.data.data[i] =
- ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ ieee80211_frequency_to_channel(
+ request->channels[i]->center_freq);
msg1.channellist.data.len = request->n_channels;
msg1.maxchanneltime.data = 250;
@@ -410,7 +415,8 @@ static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *reques
ie_len = ie_buf[1] + 2;
memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
bss = cfg80211_inform_bss(wiphy,
- ieee80211_get_channel(wiphy, ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
+ ieee80211_get_channel(wiphy,
+ ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
(const u8 *) &(msg2.bssid.data.data),
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 3dfa85c..333a2f6 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -350,10 +350,10 @@ PD Record codes
/*-------------------------------------------------------------*/
/* Commonly used basic types */
-typedef struct hfa384x_bytestr {
+struct hfa384x_bytestr {
u16 len;
u8 data[0];
-} __packed hfa384x_bytestr_t;
+} __packed;
typedef struct hfa384x_bytestr32 {
u16 len;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index d22db43..a9909f6 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -525,7 +525,7 @@ int prism2mgmt_start(wlandevice_t *wlandev, void *msgp)
p80211pstrd_t *pstr;
u8 bytebuf[80];
- hfa384x_bytestr_t *p2bytestr = (hfa384x_bytestr_t *) bytebuf;
+ struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
u16 word;
wlandev->macmode = WLAN_MACMODE_NONE;
@@ -1019,7 +1019,7 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
struct p80211msg_lnxreq_autojoin *msg = msgp;
p80211pstrd_t *pstr;
u8 bytebuf[256];
- hfa384x_bytestr_t *p2bytestr = (hfa384x_bytestr_t *) bytebuf;
+ struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
wlandev->macmode = WLAN_MACMODE_NONE;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 07eeceb..190d390 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -92,8 +92,10 @@ void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr);
void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len);
/* byte string conversion functions*/
-void prism2mgmt_pstr2bytestr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr);
-void prism2mgmt_bytestr2pstr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr);
+void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr);
+void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr);
/* functions to convert Group Addresses */
void prism2mgmt_get_grpaddr(u32 did, p80211pstrd_t *pstr, hfa384x_t *priv);
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index d3a06fa..9b5f3b7 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -763,7 +763,8 @@ static int prism2mib_priv(struct mibrec *mib,
*
----------------------------------------------------------------*/
-void prism2mgmt_pstr2bytestr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr)
+void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr)
{
bytestr->len = cpu_to_le16((u16) (pstr->len));
memcpy(bytestr->data, pstr->data, pstr->len);
@@ -804,7 +805,8 @@ void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr)
*
----------------------------------------------------------------*/
-void prism2mgmt_bytestr2pstr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr)
+void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr)
{
pstr->len = (u8) (le16_to_cpu((u16) (bytestr->len)));
memcpy(pstr->data, bytestr->data, pstr->len);
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 76374b2..2199f5a 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -55,7 +55,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
@@ -1279,7 +1278,7 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid,
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &
wlandev->ssid);
@@ -1361,7 +1360,7 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid,
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
hw->link_status = HFA384x_LINK_CONNECTED;
@@ -2037,7 +2036,7 @@ void prism2sta_commsqual_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid,
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
/* Reschedule timer */
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index b401974..4739c14 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -140,11 +140,9 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
- unregister_wlandev(wlandev);
- hfa384x_destroy(hw);
result = -EIO;
dev_err(&interface->dev, "hfa384x_corereset() failed.\n");
- goto failed;
+ goto failed_reset;
}
}
@@ -159,11 +157,15 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
if (register_wlandev(wlandev) != 0) {
dev_err(&interface->dev, "register_wlandev() failed.\n");
result = -EIO;
- goto failed;
+ goto failed_register;
}
goto done;
+failed_register:
+ usb_put_dev(dev);
+failed_reset:
+ wlan_unsetup(wlandev);
failed:
kfree(wlandev);
kfree(hw);
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index c033da4..95ce970 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -11,7 +11,7 @@
#define PCI_DEVICE_ID_XGI_27 0x027
#endif
-static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
+static const struct pci_device_id xgifb_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
{PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
{PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
index b15f778..75c38c8 100644
--- a/drivers/staging/xillybus/Kconfig
+++ b/drivers/staging/xillybus/Kconfig
@@ -4,7 +4,7 @@
config XILLYBUS
tristate "Xillybus generic FPGA interface"
- depends on PCI || (OF_ADDRESS && OF_IRQ) && m
+ depends on PCI || (OF_ADDRESS && OF_IRQ)
help
Xillybus is a generic interface for peripherals designed on
programmable logic (FPGA). The driver probes the hardware for
diff --git a/drivers/staging/xillybus/xillybus_of.c b/drivers/staging/xillybus/xillybus_of.c
index 394bfea..23a609b 100644
--- a/drivers/staging/xillybus/xillybus_of.c
+++ b/drivers/staging/xillybus/xillybus_of.c
@@ -31,7 +31,8 @@ static const char xillyname[] = "xillybus_of";
/* Match table for of_platform binding */
static struct of_device_id xillybus_of_match[] = {
- { .compatible = "xlnx,xillybus-1.00.a", },
+ { .compatible = "xillybus,xillybus-1.00.a", },
+ { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
{}
};
@@ -53,6 +54,13 @@ static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
}
+static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+}
+
static dma_addr_t xilly_map_single_of(struct xilly_cleanup *mem,
struct xilly_endpoint *ep,
void *ptr,
@@ -101,14 +109,26 @@ static struct xilly_endpoint_hardware of_hw = {
.unmap_single = xilly_unmap_single_of
};
+static struct xilly_endpoint_hardware of_hw_coherent = {
+ .owner = THIS_MODULE,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
+ .map_single = xilly_map_single_of,
+ .unmap_single = xilly_unmap_single_of
+};
+
static int xilly_drv_probe(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint;
int rc = 0;
int irq;
+ struct xilly_endpoint_hardware *ephw = &of_hw;
- endpoint = xillybus_init_endpoint(NULL, dev, &of_hw);
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ ephw = &of_hw_coherent;
+
+ endpoint = xillybus_init_endpoint(NULL, dev, ephw);
if (!endpoint)
return -ENOMEM;
@@ -131,10 +151,10 @@ static int xilly_drv_probe(struct platform_device *op)
}
endpoint->registers = of_iomap(dev->of_node, 0);
-
if (!endpoint->registers) {
dev_err(endpoint->dev,
"Failed to map I/O memory. Aborting.\n");
+ rc = -EIO;
goto failed_iomap0;
}
diff --git a/drivers/staging/xillybus/xillybus_pcie.c b/drivers/staging/xillybus/xillybus_pcie.c
index 1811aa7..51426d8 100644
--- a/drivers/staging/xillybus/xillybus_pcie.c
+++ b/drivers/staging/xillybus/xillybus_pcie.c
@@ -30,7 +30,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus_pcie";
-static DEFINE_PCI_DEVICE_TABLE(xillyids) = {
+static const struct pci_device_id xillyids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
{PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
@@ -168,9 +168,9 @@ static int xilly_probe(struct pci_dev *pdev,
}
endpoint->registers = pci_iomap(pdev, 0, 128);
-
if (!endpoint->registers) {
dev_err(endpoint->dev, "Failed to map BAR 0. Aborting.\n");
+ rc = -EIO;
goto failed_iomap0;
}
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
index 0ae13cd0..9d1f2a2 100644
--- a/drivers/staging/zsmalloc/Kconfig
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -9,3 +9,16 @@ config ZSMALLOC
non-standard allocator interface where a handle, not a pointer, is
returned by an alloc(). This handle must be mapped in order to
access the allocated space.
+
+config PGTABLE_MAPPING
+ bool "Use page table mapping to access object in zsmalloc"
+ depends on ZSMALLOC
+ help
+ By default, zsmalloc uses a copy-based object mapping method to
+ access allocations that span two pages. However, if a particular
+ architecture (ex, ARM) performs VM mapping faster than copying,
+ then you should select this. This causes zsmalloc to use page table
+ mapping rather than copying for object mapping.
+
+ You can check speed with zsmalloc benchmark[1].
+ [1] https://github.com/spartacus06/zsmalloc
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 3b950e5..7660c87 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -10,16 +10,14 @@
* Released under the terms of GNU General Public License Version 2.0
*/
-
/*
- * This allocator is designed for use with zcache and zram. Thus, the
- * allocator is supposed to work well under low memory conditions. In
- * particular, it never attempts higher order page allocation which is
- * very likely to fail under memory pressure. On the other hand, if we
- * just use single (0-order) pages, it would suffer from very high
- * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
- * an entire page. This was one of the major issues with its predecessor
- * (xvmalloc).
+ * This allocator is designed for use with zram. Thus, the allocator is
+ * supposed to work well under low memory conditions. In particular, it
+ * never attempts higher order page allocation which is very likely to
+ * fail under memory pressure. On the other hand, if we just use single
+ * (0-order) pages, it would suffer from very high fragmentation --
+ * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
+ * This was one of the major issues with its predecessor (xvmalloc).
*
* To overcome these issues, zsmalloc allocates a bunch of 0-order pages
* and links them together using various 'struct page' fields. These linked
@@ -27,6 +25,21 @@
* page boundaries. The code refers to these linked pages as a single entity
* called zspage.
*
+ * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
+ * since this satisfies the requirements of all its current users (in the
+ * worst case, page is incompressible and is thus stored "as-is" i.e. in
+ * uncompressed form). For allocation requests larger than this size, failure
+ * is returned (see zs_malloc).
+ *
+ * Additionally, zs_malloc() does not return a dereferenceable pointer.
+ * Instead, it returns an opaque handle (unsigned long) which encodes actual
+ * location of the allocated object. The reason for this indirection is that
+ * zsmalloc does not keep zspages permanently mapped since that would cause
+ * issues on 32-bit systems where the VA region for kernel space mappings
+ * is very small. So, before using the allocating memory, the object has to
+ * be mapped using zs_map_object() to get a usable pointer and subsequently
+ * unmapped using zs_unmap_object().
+ *
* Following is how we use various fields and flags of underlying
* struct page(s) to form a zspage.
*
@@ -67,7 +80,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/highmem.h>
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/tlbflush.h>
@@ -98,7 +110,7 @@
/*
* Object location (<PFN>, <obj_idx>) is encoded as
- * as single (void *) handle value.
+ * as single (unsigned long) handle value.
*
* Note that object index <obj_idx> is relative to system
* page <PFN> it is stored in, so for each sub-page belonging
@@ -218,19 +230,8 @@ struct zs_pool {
#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
-/*
- * By default, zsmalloc uses a copy-based object mapping method to access
- * allocations that span two pages. However, if a particular architecture
- * performs VM mapping faster than copying, then it should be added here
- * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
- * page table mapping rather than copying for object mapping.
- */
-#if defined(CONFIG_ARM) && !defined(MODULE)
-#define USE_PGTABLE_MAPPING
-#endif
-
struct mapping_area {
-#ifdef USE_PGTABLE_MAPPING
+#ifdef CONFIG_PGTABLE_MAPPING
struct vm_struct *vm; /* vm area for mapping object that span pages */
#else
char *vm_buf; /* copy buffer for objects that span pages */
@@ -275,6 +276,13 @@ static void set_zspage_mapping(struct page *page, unsigned int class_idx,
page->mapping = (struct address_space *)m;
}
+/*
+ * zsmalloc divides the pool into various size classes where each
+ * class maintains a list of zspages where each zspage is divided
+ * into equal sized chunks. Each allocation falls into one of these
+ * classes depending on its size. This function returns index of the
+ * size class which has chunk size big enough to hold the give size.
+ */
static int get_size_class_index(int size)
{
int idx = 0;
@@ -286,6 +294,13 @@ static int get_size_class_index(int size)
return idx;
}
+/*
+ * For each size class, zspages are divided into different groups
+ * depending on how "full" they are. This was done so that we could
+ * easily find empty or nearly empty zspages when we try to shrink
+ * the pool (not yet implemented). This function returns fullness
+ * status of the given page.
+ */
static enum fullness_group get_fullness_group(struct page *page)
{
int inuse, max_objects;
@@ -307,6 +322,12 @@ static enum fullness_group get_fullness_group(struct page *page)
return fg;
}
+/*
+ * Each size class maintains various freelists and zspages are assigned
+ * to one of these freelists based on the number of live objects they
+ * have. This functions inserts the given zspage into the freelist
+ * identified by <class, fullness_group>.
+ */
static void insert_zspage(struct page *page, struct size_class *class,
enum fullness_group fullness)
{
@@ -324,6 +345,10 @@ static void insert_zspage(struct page *page, struct size_class *class,
*head = page;
}
+/*
+ * This function removes the given zspage from the freelist identified
+ * by <class, fullness_group>.
+ */
static void remove_zspage(struct page *page, struct size_class *class,
enum fullness_group fullness)
{
@@ -345,6 +370,15 @@ static void remove_zspage(struct page *page, struct size_class *class,
list_del_init(&page->lru);
}
+/*
+ * Each size class maintains zspages in different fullness groups depending
+ * on the number of live objects they contain. When allocating or freeing
+ * objects, the fullness status of the page can change, say, from ALMOST_FULL
+ * to ALMOST_EMPTY when freeing an object. This function checks if such
+ * a status change has occurred for the given page and accordingly moves the
+ * page from the freelist of the old fullness group to that of the new
+ * fullness group.
+ */
static enum fullness_group fix_fullness_group(struct zs_pool *pool,
struct page *page)
{
@@ -631,7 +665,7 @@ static struct page *find_get_zspage(struct size_class *class)
return page;
}
-#ifdef USE_PGTABLE_MAPPING
+#ifdef CONFIG_PGTABLE_MAPPING
static inline int __zs_cpu_up(struct mapping_area *area)
{
/*
@@ -669,7 +703,7 @@ static inline void __zs_unmap_object(struct mapping_area *area,
unmap_kernel_range(addr, PAGE_SIZE * 2);
}
-#else /* USE_PGTABLE_MAPPING */
+#else /* CONFIG_PGTABLE_MAPPING */
static inline int __zs_cpu_up(struct mapping_area *area)
{
@@ -747,7 +781,7 @@ out:
pagefault_enable();
}
-#endif /* USE_PGTABLE_MAPPING */
+#endif /* CONFIG_PGTABLE_MAPPING */
static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
void *pcpu)
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
index fbe6bec..c2eb174 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -18,12 +18,19 @@
/*
* zsmalloc mapping modes
*
- * NOTE: These only make a difference when a mapped object spans pages
+ * NOTE: These only make a difference when a mapped object spans pages.
+ * They also have no effect when PGTABLE_MAPPING is selected.
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */
ZS_MM_RO, /* read-only (no copy-out at unmap time) */
ZS_MM_WO /* write-only (no copy-in at map time) */
+ /*
+ * NOTE: ZS_MM_WO should only be used for initializing new
+ * (uninitialized) allocations. Partial writes to already
+ * initialized allocations should use ZS_MM_RW to preserve the
+ * existing data.
+ */
};
struct zs_pool;
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 8f181b3..d833c8f 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -438,14 +438,12 @@ static int clamp_thread(void *arg)
*/
local_touch_nmi();
stop_critical_timings();
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- cpu_relax(); /* allow HT sibling to run */
- __mwait(eax, ecx);
+ mwait_idle_with_hints(eax, ecx);
start_critical_timings();
atomic_inc(&idle_wakeup_counter);
}
tick_nohz_idle_exit();
- preempt_enable_no_resched();
+ preempt_enable();
}
del_timer_sync(&wakeup_timer);
clear_bit(cpunr, cpu_clamping_mask);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 71630a2..979e7c3 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1248,6 +1248,8 @@ static int rs_ioctl(struct tty_struct *tty,
struct async_icount cprev, cnow; /* kernel counter temps */
void __user *argp = (void __user *)arg;
unsigned long flags;
+ DEFINE_WAIT(wait);
+ int ret;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
@@ -1288,25 +1290,33 @@ static int rs_ioctl(struct tty_struct *tty,
cprev = info->icount;
local_irq_restore(flags);
while (1) {
- interruptible_sleep_on(&info->tport.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
+ prepare_to_wait(&info->tport.delta_msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
local_irq_save(flags);
cnow = info->icount; /* atomic copy */
local_irq_restore(flags);
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
- cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
- return -EIO; /* no change => error */
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ ret = -EIO; /* no change => error */
+ break;
+ }
if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
- return 0;
+ ret = 0;
+ break;
+ }
+ schedule();
+ /* see if a signal did it */
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
}
cprev = cnow;
}
- /* NOTREACHED */
+ finish_wait(&info->tport.delta_msr_wait, &wait);
+ return ret;
case TIOCSERGWILD:
case TIOCSERSWILD:
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 33f83fe..a57bb5a 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -2709,6 +2709,8 @@ cy_ioctl(struct tty_struct *tty,
break;
#ifndef CONFIG_CYZ_INTR
case CYZSETPOLLCYCLE:
+ if (arg > LONG_MAX / HZ)
+ return -ENODEV;
cyz_polling_cycle = (arg * HZ) / 1000;
break;
case CYZGETPOLLCYCLE:
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index f17d2e4..75dc9d2 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -14,7 +14,6 @@
*/
#include <linux/console.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 9eba119..50b4688 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -788,7 +788,7 @@ static int hvc_tiocmset(struct tty_struct *tty,
}
#ifdef CONFIG_CONSOLE_POLL
-int hvc_poll_init(struct tty_driver *driver, int line, char *options)
+static int hvc_poll_init(struct tty_driver *driver, int line, char *options)
{
return 0;
}
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 347050e..7ae6c29 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -1,5 +1,4 @@
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 8fd72ff..ebd5bff 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -15,7 +15,6 @@
* Copyright (C) 2007 David Sterba
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c0f76da..f34461c 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -194,6 +194,7 @@ struct gsm_control {
struct gsm_mux {
struct tty_struct *tty; /* The tty our ldisc is bound to */
spinlock_t lock;
+ struct mutex mutex;
unsigned int num;
struct kref ref;
@@ -1704,11 +1705,8 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- /* tty_vhangup needs the tty_lock, so unlock and
- relock after doing the hangup. */
- tty_unlock(tty);
tty_vhangup(tty);
- tty_lock(tty);
+
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
}
@@ -2019,7 +2017,7 @@ static void gsm_error(struct gsm_mux *gsm,
* and then shut down each device hanging up the channels as we go.
*/
-void gsm_cleanup_mux(struct gsm_mux *gsm)
+static void gsm_cleanup_mux(struct gsm_mux *gsm)
{
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
@@ -2054,15 +2052,16 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
dlci->state == DLCI_CLOSED);
}
/* Free up any link layer users */
+ mutex_lock(&gsm->mutex);
for (i = 0; i < NUM_DLCI; i++)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
+ mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_list);
}
-EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
/**
* gsm_activate_mux - generic GSM setup
@@ -2073,7 +2072,7 @@ EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
* finally kick off connecting to DLCI 0 on the modem.
*/
-int gsm_activate_mux(struct gsm_mux *gsm)
+static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
int i = 0;
@@ -2109,7 +2108,6 @@ int gsm_activate_mux(struct gsm_mux *gsm)
gsm->dead = 0; /* Tty opens are now permissible */
return 0;
}
-EXPORT_SYMBOL_GPL(gsm_activate_mux);
/**
* gsm_free_mux - free up a mux
@@ -2117,13 +2115,12 @@ EXPORT_SYMBOL_GPL(gsm_activate_mux);
*
* Dispose of allocated resources for a dead mux
*/
-void gsm_free_mux(struct gsm_mux *gsm)
+static void gsm_free_mux(struct gsm_mux *gsm)
{
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
}
-EXPORT_SYMBOL_GPL(gsm_free_mux);
/**
* gsm_free_muxr - free up a mux
@@ -2153,7 +2150,7 @@ static inline void mux_put(struct gsm_mux *gsm)
* Creates a new mux ready for activation.
*/
-struct gsm_mux *gsm_alloc_mux(void)
+static struct gsm_mux *gsm_alloc_mux(void)
{
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
if (gsm == NULL)
@@ -2170,6 +2167,7 @@ struct gsm_mux *gsm_alloc_mux(void)
return NULL;
}
spin_lock_init(&gsm->lock);
+ mutex_init(&gsm->mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_list);
@@ -2185,7 +2183,6 @@ struct gsm_mux *gsm_alloc_mux(void)
return gsm;
}
-EXPORT_SYMBOL_GPL(gsm_alloc_mux);
/**
* gsmld_output - write to link
@@ -2269,14 +2266,15 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *f;
int i;
char buf[64];
- char flags;
+ char flags = TTY_NORMAL;
if (debug & 4)
print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
cp, count);
for (i = count, dp = cp, f = fp; i; i--, dp++) {
- flags = *f++;
+ if (f)
+ flags = *f++;
switch (flags) {
case TTY_NORMAL:
gsm->receive(gsm, *dp);
@@ -2711,7 +2709,7 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
return;
}
-int gsm_change_mtu(struct net_device *net, int new_mtu)
+static int gsm_change_mtu(struct net_device *net, int new_mtu)
{
struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
if ((new_mtu < 8) || (new_mtu > mux_net->dlci->gsm->mtu))
@@ -2909,23 +2907,33 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
This is ok from a locking
perspective as we don't have to worry about this
if DLCI0 is lost */
- if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
+ mutex_lock(&gsm->mutex);
+ if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
+ mutex_unlock(&gsm->mutex);
return -EL2NSYNC;
+ }
dlci = gsm->dlci[line];
if (dlci == NULL) {
alloc = true;
dlci = gsm_dlci_alloc(gsm, line);
}
- if (dlci == NULL)
+ if (dlci == NULL) {
+ mutex_unlock(&gsm->mutex);
return -ENOMEM;
+ }
ret = tty_port_install(&dlci->port, driver, tty);
if (ret) {
if (alloc)
dlci_put(dlci);
+ mutex_unlock(&gsm->mutex);
return ret;
}
+ dlci_get(dlci);
+ dlci_get(gsm->dlci[0]);
+ mux_get(gsm);
tty->driver_data = dlci;
+ mutex_unlock(&gsm->mutex);
return 0;
}
@@ -2936,9 +2944,6 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
struct tty_port *port = &dlci->port;
port->count++;
- dlci_get(dlci);
- dlci_get(dlci->gsm->dlci[0]);
- mux_get(dlci->gsm);
tty_port_tty_set(port, tty);
dlci->modem_rx = 0;
@@ -2965,7 +2970,7 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&dlci->mutex);
gsm = dlci->gsm;
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
- goto out;
+ return;
gsm_dlci_begin_close(dlci);
if (test_bit(ASYNCB_INITIALIZED, &dlci->port.flags)) {
if (C_HUPCL(tty))
@@ -2973,10 +2978,7 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
}
tty_port_close_end(&dlci->port, tty);
tty_port_tty_set(&dlci->port, NULL);
-out:
- dlci_put(dlci);
- dlci_put(gsm->dlci[0]);
- mux_put(gsm);
+ return;
}
static void gsmtty_hangup(struct tty_struct *tty)
@@ -3153,6 +3155,16 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
return gsmtty_modem_update(dlci, encode);
}
+static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ struct gsm_mux *gsm = dlci->gsm;
+
+ dlci_put(dlci);
+ dlci_put(gsm->dlci[0]);
+ mux_put(gsm);
+ driver->ttys[tty->index] = NULL;
+}
/* Virtual ttys for the demux */
static const struct tty_operations gsmtty_ops = {
@@ -3172,6 +3184,7 @@ static const struct tty_operations gsmtty_ops = {
.tiocmget = gsmtty_tiocmget,
.tiocmset = gsmtty_tiocmset,
.break_ctl = gsmtty_break_ctl,
+ .remove = gsmtty_remove,
};
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 1e64050..8b157d6 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1244,7 +1244,7 @@ static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct r3964_info *pInfo = tty->disc_data;
const unsigned char *p;
- char *f, flags = 0;
+ char *f, flags = TTY_NORMAL;
int i;
for (i = count, p = cp, f = fp; i; i--, p++) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 34aacaa..cb8017a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -105,6 +105,7 @@ struct n_tty_data {
/* must hold exclusive termios_rwsem to reset these */
unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
+ unsigned char push:1;
/* shared by producer and consumer */
char read_buf[N_TTY_BUF_SIZE];
@@ -275,7 +276,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
return;
n_tty_set_room(tty);
n_tty_write_wakeup(tty->link);
- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+ if (waitqueue_active(&tty->link->write_wait))
+ wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
return;
}
@@ -342,6 +344,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
+ ldata->push = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
@@ -351,7 +354,8 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
spin_lock_irqsave(&tty->ctrl_lock, flags);
if (tty->link->packet) {
tty->ctrl_status |= TIOCPKT_FLUSHREAD;
- wake_up_interruptible(&tty->link->read_wait);
+ if (waitqueue_active(&tty->link->read_wait))
+ wake_up_interruptible(&tty->link->read_wait);
}
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
}
@@ -1162,7 +1166,8 @@ static void n_tty_receive_break(struct tty_struct *tty)
put_tty_queue('\0', ldata);
}
put_tty_queue('\0', ldata);
- wake_up_interruptible(&tty->read_wait);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
}
/**
@@ -1220,7 +1225,8 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
put_tty_queue('\0', ldata);
else
put_tty_queue(c, ldata);
- wake_up_interruptible(&tty->read_wait);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
}
static void
@@ -1264,7 +1270,6 @@ static int
n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- int parmrk;
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
@@ -1349,8 +1354,6 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
}
if ((c == EOL_CHAR(tty)) ||
(c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty))
- ? 1 : 0;
/*
* XXX are EOL_CHAR and EOL2_CHAR echoed?!?
*/
@@ -1365,7 +1368,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
* XXX does PARMRK doubling happen for
* EOL_CHAR and EOL2_CHAR?
*/
- if (parmrk)
+ if (c == (unsigned char) '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
handle_newline:
@@ -1379,7 +1382,6 @@ handle_newline:
}
}
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
if (L_ECHO(tty)) {
finish_erasing(ldata);
if (c == '\n')
@@ -1393,7 +1395,8 @@ handle_newline:
commit_echoes(tty);
}
- if (parmrk)
+ /* PARMRK doubling check */
+ if (c == (unsigned char) '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
@@ -1404,7 +1407,6 @@ static inline void
n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- int parmrk;
if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
start_tty(tty);
@@ -1418,13 +1420,13 @@ n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
echo_char(c, tty);
commit_echoes(tty);
}
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
- if (parmrk)
+ /* PARMRK doubling check */
+ if (c == (unsigned char) '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
}
-static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
n_tty_receive_char_inline(tty, c);
}
@@ -1449,8 +1451,7 @@ n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static inline void
-n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
{
if (I_ISTRIP(tty))
c &= 0x7f;
@@ -1681,32 +1682,9 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
}
}
-static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
-{
- int room, n;
-
- down_read(&tty->termios_rwsem);
-
- while (1) {
- room = receive_room(tty);
- n = min(count, room);
- if (!n)
- break;
- __receive_buf(tty, cp, fp, n);
- cp += n;
- if (fp)
- fp += n;
- count -= n;
- }
-
- tty->receive_room = room;
- n_tty_check_throttle(tty);
- up_read(&tty->termios_rwsem);
-}
-
-static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+static int
+n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count, int flow)
{
struct n_tty_data *ldata = tty->disc_data;
int room, n, rcvd = 0;
@@ -1717,7 +1695,7 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
room = receive_room(tty);
n = min(count, room);
if (!n) {
- if (!room)
+ if (flow && !room)
ldata->no_room = 1;
break;
}
@@ -1736,6 +1714,18 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
return rcvd;
}
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ n_tty_receive_buf_common(tty, cp, fp, count, 0);
+}
+
+static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ return n_tty_receive_buf_common(tty, cp, fp, count, 1);
+}
+
int is_ignored(int sig)
{
return (sigismember(&current->blocked, sig) ||
@@ -1762,7 +1752,16 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
- ldata->line_start = ldata->canon_head = ldata->read_tail;
+ ldata->line_start = ldata->read_tail;
+ if (!L_ICANON(tty) || !read_cnt(ldata)) {
+ ldata->canon_head = ldata->read_tail;
+ ldata->push = 0;
+ } else {
+ set_bit((ldata->read_head - 1) & (N_TTY_BUF_SIZE - 1),
+ ldata->read_flags);
+ ldata->canon_head = ldata->read_head;
+ ldata->push = 1;
+ }
ldata->erasing = 0;
ldata->lnext = 0;
}
@@ -1825,8 +1824,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
start_tty(tty);
/* The termios change make the tty ready for I/O */
- wake_up_interruptible(&tty->write_wait);
- wake_up_interruptible(&tty->read_wait);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
}
/**
@@ -1892,14 +1893,15 @@ err:
return -ENOMEM;
}
-static inline int input_available_p(struct tty_struct *tty, int amt)
+static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
+ int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty)) {
if (ldata->canon_head != ldata->read_tail)
return 1;
- } else if (read_cnt(ldata) >= (amt ? amt : 1))
+ } else if (read_cnt(ldata) >= amt)
return 1;
return 0;
@@ -1965,6 +1967,12 @@ static int copy_from_read_buf(struct tty_struct *tty,
* it copies one line of input up to and including the line-delimiting
* character into the user-space buffer.
*
+ * NB: When termios is changed from non-canonical to canonical mode and
+ * the read buffer contains data, n_tty_set_termios() simulates an EOF
+ * push (as if C-d were input) _without_ the DISABLED_CHAR in the buffer.
+ * This causes data already processed as input to be immediately available
+ * as input although a newline has not been received.
+ *
* Called under the atomic_read_lock mutex
*
* n_tty_read()/consumer path:
@@ -2011,7 +2019,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
n += found;
c = n;
- if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
+ if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) {
n--;
eof_push = !n && ldata->read_tail != ldata->line_start;
}
@@ -2038,7 +2046,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
ldata->read_tail += c;
if (found) {
- ldata->line_start = ldata->read_tail;
+ if (!ldata->push)
+ ldata->line_start = ldata->read_tail;
+ else
+ ldata->push = 0;
tty_audit_push(tty);
}
return eof_push ? -EAGAIN : 0;
@@ -2398,7 +2409,7 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
- if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty)))
+ if (input_available_p(tty, 1))
mask |= POLLIN | POLLRDNORM;
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 354564e..383c4c7 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1744,7 +1744,7 @@ static void rp_flush_buffer(struct tty_struct *tty)
#ifdef CONFIG_PCI
-static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
+static const struct pci_device_id rocket_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4QUAD) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8OCTA) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8OCTA) },
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e33d38c..61ecd70 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2670,6 +2670,10 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
+ /* HW bugs may trigger IRQ while IIR == NO_INT */
+ if (port->type == PORT_TEGRA)
+ up->bugs |= UART_BUG_NOMSR;
+
if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 06525f1..faa64e6 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -14,7 +14,6 @@
* raised, the LCR needs to be rewritten and the uart status register read.
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
@@ -274,7 +273,6 @@ static int dw8250_probe_of(struct uart_port *p,
return 0;
}
-#ifdef CONFIG_ACPI
static int dw8250_probe_acpi(struct uart_8250_port *up,
struct dw8250_data *data)
{
@@ -302,13 +300,6 @@ static int dw8250_probe_acpi(struct uart_8250_port *up,
return 0;
}
-#else
-static inline int dw8250_probe_acpi(struct uart_8250_port *up,
- struct dw8250_data *data)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_ACPI */
static int dw8250_probe(struct platform_device *pdev)
{
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index d1a9078..56c8723 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4697a51..50228ee 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -11,7 +11,6 @@
*/
#undef DEBUG
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/kernel.h>
@@ -1259,10 +1258,10 @@ static int pci_quatech_init(struct pci_dev *dev)
unsigned long base = pci_resource_start(dev, 0);
if (base) {
u32 tmp;
- outl(inl(base + 0x38), base + 0x38);
+ outl(inl(base + 0x38) | 0x00002000, base + 0x38);
tmp = inl(base + 0x3c);
outl(tmp | 0x01000000, base + 0x3c);
- outl(tmp, base + 0x3c);
+ outl(tmp &= ~0x01000000, base + 0x3c);
}
}
return 0;
@@ -1744,6 +1743,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_200V3 0xA306
#define PCI_DEVICE_ID_TITAN_400V3 0xA310
#define PCI_DEVICE_ID_TITAN_410V3 0xA312
#define PCI_DEVICE_ID_TITAN_800V3 0xA314
@@ -4427,6 +4427,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_921600 },
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 35d9ab9..682a2fb 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -12,7 +12,6 @@
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/string.h>
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 1b74b88..4d180c9 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a3817ab..441ada4 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -181,9 +181,8 @@ config SERIAL_KS8695_CONSOLE
config SERIAL_CLPS711X
tristate "CLPS711X serial port support"
- depends on ARCH_CLPS711X
+ depends on ARCH_CLPS711X || COMPILE_TEST
select SERIAL_CORE
- default y
help
This enables the driver for the on-chip UARTs of the Cirrus
Logic EP711x/EP721x/EP731x processors.
@@ -1146,31 +1145,13 @@ config SERIAL_QE
This driver supports the QE serial ports on Freescale embedded
PowerPC that contain a QUICC Engine.
-config SERIAL_SC26XX
- tristate "SC2681/SC2692 serial port support"
- depends on SNI_RM
- select SERIAL_CORE
- help
- This is a driver for the onboard serial ports of
- older RM400 machines.
-
-config SERIAL_SC26XX_CONSOLE
- bool "Console on SC2681/SC2692 serial port"
- depends on SERIAL_SC26XX=y
- select SERIAL_CORE_CONSOLE
- help
- Support for Console on SC2681/SC2692 serial ports.
-
config SERIAL_SCCNXP
tristate "SCCNXP serial port support"
- depends on !SERIAL_SC26XX
select SERIAL_CORE
- default n
help
This selects support for an advanced UART from NXP (Philips).
Supported ICs are SCC2681, SCC2691, SCC2692, SC28L91, SC28L92,
SC28L202, SCC68681 and SCC68692.
- Positioned as a replacement for the driver SC26XX.
config SERIAL_SCCNXP_CONSOLE
bool "Console on SCCNXP serial port"
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 3068c77..3680854 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
-obj-$(CONFIG_SERIAL_SC26XX) += sc26xx.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 33bd860..01c9e72 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -756,9 +756,10 @@ static int pl010_remove(struct amba_device *dev)
return 0;
}
-static int pl010_suspend(struct amba_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pl010_suspend(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (uap)
uart_suspend_port(&amba_reg, &uap->port);
@@ -766,15 +767,18 @@ static int pl010_suspend(struct amba_device *dev, pm_message_t state)
return 0;
}
-static int pl010_resume(struct amba_device *dev)
+static int pl010_resume(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (uap)
uart_resume_port(&amba_reg, &uap->port);
return 0;
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pl010_dev_pm_ops, pl010_suspend, pl010_resume);
static struct amba_id pl010_ids[] = {
{
@@ -789,12 +793,11 @@ MODULE_DEVICE_TABLE(amba, pl010_ids);
static struct amba_driver pl010_driver = {
.drv = {
.name = "uart-pl010",
+ .pm = &pl010_dev_pm_ops,
},
.id_table = pl010_ids,
.probe = pl010_probe,
.remove = pl010_remove,
- .suspend = pl010_suspend,
- .resume = pl010_resume,
};
static int __init pl010_init(void)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 7203864..d58783d 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -112,8 +112,6 @@ static struct vendor_data vendor_st = {
.get_fifosize = get_fifosize_st,
};
-static struct uart_amba_port *amba_ports[UART_NR];
-
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -969,6 +967,8 @@ static void pl011_dma_rx_poll(unsigned long args)
spin_lock_irqsave(&uap->port.lock, flags);
pl011_dma_rx_stop(uap);
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irqrestore(&uap->port.lock, flags);
uap->dmarx.running = false;
@@ -1216,8 +1216,8 @@ __acquires(&uap->port.lock)
dev_dbg(uap->port.dev, "could not trigger RX DMA job "
"fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
} else {
- uap->im &= ~UART011_RXIM;
#ifdef CONFIG_DMA_ENGINE
/* Start Rx DMA poll */
if (uap->dmarx.poll_rate) {
@@ -1229,8 +1229,6 @@ __acquires(&uap->port.lock)
}
#endif
}
-
- writew(uap->im, uap->port.membase + UART011_IMSC);
}
spin_lock(&uap->port.lock);
}
@@ -1513,10 +1511,25 @@ static int pl011_hwinit(struct uart_port *port)
return retval;
}
+static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
+{
+ writew(lcr_h, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_rx != uap->lcrh_tx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(lcr_h, uap->port.membase + uap->lcrh_tx);
+ }
+}
+
static int pl011_startup(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
- unsigned int cr;
+ unsigned int cr, lcr_h, fbrd, ibrd;
int retval;
retval = pl011_hwinit(port);
@@ -1535,32 +1548,36 @@ static int pl011_startup(struct uart_port *port)
writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
/*
- * Provoke TX FIFO interrupt into asserting.
+ * Provoke TX FIFO interrupt into asserting. Taking care to preserve
+ * baud rate and data format specified by FBRD, IBRD and LCRH as the
+ * UART may already be in use as a console.
*/
+ spin_lock_irq(&uap->port.lock);
+
+ fbrd = readw(uap->port.membase + UART011_FBRD);
+ ibrd = readw(uap->port.membase + UART011_IBRD);
+ lcr_h = readw(uap->port.membase + uap->lcrh_rx);
+
cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
writew(cr, uap->port.membase + UART011_CR);
writew(0, uap->port.membase + UART011_FBRD);
writew(1, uap->port.membase + UART011_IBRD);
- writew(0, uap->port.membase + uap->lcrh_rx);
- if (uap->lcrh_tx != uap->lcrh_rx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(0, uap->port.membase + uap->lcrh_tx);
- }
+ pl011_write_lcr_h(uap, 0);
writew(0, uap->port.membase + UART01x_DR);
while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
barrier();
+ writew(fbrd, uap->port.membase + UART011_FBRD);
+ writew(ibrd, uap->port.membase + UART011_IBRD);
+ pl011_write_lcr_h(uap, lcr_h);
+
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
+ spin_unlock_irq(&uap->port.lock);
+
/*
* initialise the old status of the modem signals
*/
@@ -1629,11 +1646,13 @@ static void pl011_shutdown(struct uart_port *port)
* it during startup().
*/
uap->autorts = false;
+ spin_lock_irq(&uap->port.lock);
cr = readw(uap->port.membase + UART011_CR);
uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
+ spin_unlock_irq(&uap->port.lock);
/*
* disable break condition and fifos
@@ -1797,17 +1816,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* UART011_FBRD & UART011_IBRD.
* ----------^----------^----------^----------^-----
*/
- writew(lcr_h, port->membase + uap->lcrh_rx);
- if (uap->lcrh_rx != uap->lcrh_tx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(lcr_h, port->membase + uap->lcrh_tx);
- }
+ pl011_write_lcr_h(uap, lcr_h);
writew(old_cr, port->membase + UART011_CR);
spin_unlock_irqrestore(&port->lock, flags);
@@ -2169,10 +2178,10 @@ static int pl011_remove(struct amba_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-static int pl011_suspend(struct amba_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pl011_suspend(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (!uap)
return -EINVAL;
@@ -2180,9 +2189,9 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state)
return uart_suspend_port(&amba_reg, &uap->port);
}
-static int pl011_resume(struct amba_device *dev)
+static int pl011_resume(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (!uap)
return -EINVAL;
@@ -2191,6 +2200,8 @@ static int pl011_resume(struct amba_device *dev)
}
#endif
+static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
+
static struct amba_id pl011_ids[] = {
{
.id = 0x00041011,
@@ -2210,14 +2221,11 @@ MODULE_DEVICE_TABLE(amba, pl011_ids);
static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
+ .pm = &pl011_dev_pm_ops,
},
.id_table = pl011_ids,
.probe = pl011_probe,
.remove = pl011_remove,
-#ifdef CONFIG_PM
- .suspend = pl011_suspend,
- .resume = pl011_resume,
-#endif
};
static int __init pl011_init(void)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index c7d99af..a49f10d 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -825,9 +825,6 @@ static void atmel_release_rx_dma(struct uart_port *port)
atmel_port->desc_rx = NULL;
atmel_port->chan_rx = NULL;
atmel_port->cookie_rx = -EINVAL;
-
- if (!atmel_port->is_usart)
- del_timer_sync(&atmel_port->uart_timer);
}
static void atmel_rx_from_dma(struct uart_port *port)
@@ -1229,9 +1226,6 @@ static void atmel_release_rx_pdc(struct uart_port *port)
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
-
- if (!atmel_port->is_usart)
- del_timer_sync(&atmel_port->uart_timer);
}
static void atmel_rx_from_pdc(struct uart_port *port)
@@ -1604,12 +1598,13 @@ static int atmel_startup(struct uart_port *port)
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ setup_timer(&atmel_port->uart_timer,
+ atmel_uart_timer_callback,
+ (unsigned long)port);
+
if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
if (!atmel_port->is_usart) {
- setup_timer(&atmel_port->uart_timer,
- atmel_uart_timer_callback,
- (unsigned long)port);
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
@@ -1624,9 +1619,6 @@ static int atmel_startup(struct uart_port *port)
} else if (atmel_use_dma_rx(port)) {
/* set UART timeout */
if (!atmel_port->is_usart) {
- setup_timer(&atmel_port->uart_timer,
- atmel_uart_timer_callback,
- (unsigned long)port);
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
@@ -1650,12 +1642,30 @@ static int atmel_startup(struct uart_port *port)
static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
/*
- * Ensure everything is stopped.
+ * Prevent any tasklets being scheduled during
+ * cleanup
+ */
+ del_timer_sync(&atmel_port->uart_timer);
+
+ /*
+ * Clear out any scheduled tasklets before
+ * we destroy the buffers
+ */
+ tasklet_kill(&atmel_port->tasklet);
+
+ /*
+ * Ensure everything is stopped and
+ * disable all interrupts, port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ UART_PUT_IDR(port, -1);
+
+
/*
* Shut-down the DMA.
*/
@@ -1665,10 +1675,10 @@ static void atmel_shutdown(struct uart_port *port)
atmel_port->release_tx(port);
/*
- * Disable all interrupts, port and break condition.
+ * Reset ring buffer pointers
*/
- UART_PUT_CR(port, ATMEL_US_RSTSTA);
- UART_PUT_IDR(port, -1);
+ atmel_port->rx_ring.head = 0;
+ atmel_port->rx_ring.tail = 0;
/*
* Free the interrupt
@@ -2441,11 +2451,12 @@ static int atmel_serial_remove(struct platform_device *pdev)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
+ tasklet_kill(&atmel_port->tasklet);
+
device_init_wakeup(&pdev->dev, 0);
ret = uart_remove_one_port(&atmel_uart, port);
- tasklet_kill(&atmel_port->tasklet);
kfree(atmel_port->rx_ring.buf);
/* "port" is allocated statically, so we shouldn't free it */
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 8d0b994..b0eacb8 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -21,44 +21,66 @@
#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
-#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/ioport.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
-#include <mach/hardware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
-#define UART_CLPS711X_NAME "uart-clps711x"
+#define UART_CLPS711X_DEVNAME "ttyCL"
#define UART_CLPS711X_NR 2
#define UART_CLPS711X_MAJOR 204
#define UART_CLPS711X_MINOR 40
-#define UBRLCR(port) ((port)->line ? UBRLCR2 : UBRLCR1)
-#define UARTDR(port) ((port)->line ? UARTDR2 : UARTDR1)
-#define SYSFLG(port) ((port)->line ? SYSFLG2 : SYSFLG1)
-#define SYSCON(port) ((port)->line ? SYSCON2 : SYSCON1)
-#define TX_IRQ(port) ((port)->line ? IRQ_UTXINT2 : IRQ_UTXINT1)
-#define RX_IRQ(port) ((port)->line ? IRQ_URXINT2 : IRQ_URXINT1)
+#define UARTDR_OFFSET (0x00)
+#define UBRLCR_OFFSET (0x40)
+
+#define UARTDR_FRMERR (1 << 8)
+#define UARTDR_PARERR (1 << 9)
+#define UARTDR_OVERR (1 << 10)
+
+#define UBRLCR_BAUD_MASK ((1 << 12) - 1)
+#define UBRLCR_BREAK (1 << 12)
+#define UBRLCR_PRTEN (1 << 13)
+#define UBRLCR_EVENPRT (1 << 14)
+#define UBRLCR_XSTOP (1 << 15)
+#define UBRLCR_FIFOEN (1 << 16)
+#define UBRLCR_WRDLEN5 (0 << 17)
+#define UBRLCR_WRDLEN6 (1 << 17)
+#define UBRLCR_WRDLEN7 (2 << 17)
+#define UBRLCR_WRDLEN8 (3 << 17)
+#define UBRLCR_WRDLEN_MASK (3 << 17)
struct clps711x_port {
- struct uart_driver uart;
- struct clk *uart_clk;
- struct uart_port port[UART_CLPS711X_NR];
- int tx_enabled[UART_CLPS711X_NR];
-#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
- struct console console;
-#endif
+ struct uart_port port;
+ unsigned int tx_enabled;
+ int rx_irq;
+ struct regmap *syscon;
+ bool use_ms;
+};
+
+static struct uart_driver clps711x_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = UART_CLPS711X_DEVNAME,
+ .dev_name = UART_CLPS711X_DEVNAME,
+ .major = UART_CLPS711X_MAJOR,
+ .minor = UART_CLPS711X_MINOR,
+ .nr = UART_CLPS711X_NR,
};
static void uart_clps711x_stop_tx(struct uart_port *port)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- if (s->tx_enabled[port->line]) {
- disable_irq(TX_IRQ(port));
- s->tx_enabled[port->line] = 0;
+ if (s->tx_enabled) {
+ disable_irq(port->irq);
+ s->tx_enabled = 0;
}
}
@@ -66,33 +88,27 @@ static void uart_clps711x_start_tx(struct uart_port *port)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- if (!s->tx_enabled[port->line]) {
- enable_irq(TX_IRQ(port));
- s->tx_enabled[port->line] = 1;
+ if (!s->tx_enabled) {
+ s->tx_enabled = 1;
+ enable_irq(port->irq);
}
}
-static void uart_clps711x_stop_rx(struct uart_port *port)
-{
- disable_irq(RX_IRQ(port));
-}
-
-static void uart_clps711x_enable_ms(struct uart_port *port)
-{
- /* Do nothing */
-}
-
static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned int status, ch, flg;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ unsigned int status, flg;
+ u16 ch;
for (;;) {
- status = clps_readl(SYSFLG(port));
- if (status & SYSFLG_URXFE)
+ u32 sysflg = 0;
+
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ if (sysflg & SYSFLG_URXFE)
break;
- ch = clps_readw(UARTDR(port));
+ ch = readw(port->membase + UARTDR_OFFSET);
status = ch & (UARTDR_FRMERR | UARTDR_PARERR | UARTDR_OVERR);
ch &= 0xff;
@@ -138,23 +154,29 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id)
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
- clps_writew(port->x_char, UARTDR(port));
+ writew(port->x_char, port->membase + UARTDR_OFFSET);
port->icount.tx++;
port->x_char = 0;
return IRQ_HANDLED;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- disable_irq_nosync(TX_IRQ(port));
- s->tx_enabled[port->line] = 0;
+ if (s->tx_enabled) {
+ disable_irq_nosync(port->irq);
+ s->tx_enabled = 0;
+ }
return IRQ_HANDLED;
}
while (!uart_circ_empty(xmit)) {
- clps_writew(xmit->buf[xmit->tail], UARTDR(port));
+ u32 sysflg = 0;
+
+ writew(xmit->buf[xmit->tail], port->membase + UARTDR_OFFSET);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
- if (clps_readl(SYSFLG(port) & SYSFLG_UTXFF))
+
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ if (sysflg & SYSFLG_UTXFF)
break;
}
@@ -166,20 +188,28 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id)
static unsigned int uart_clps711x_tx_empty(struct uart_port *port)
{
- return (clps_readl(SYSFLG(port) & SYSFLG_UBUSY)) ? 0 : TIOCSER_TEMT;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ u32 sysflg = 0;
+
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+
+ return (sysflg & SYSFLG_UBUSY) ? 0 : TIOCSER_TEMT;
}
static unsigned int uart_clps711x_get_mctrl(struct uart_port *port)
{
- unsigned int status, result = 0;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ unsigned int result = 0;
+
+ if (s->use_ms) {
+ u32 sysflg = 0;
- if (port->line == 0) {
- status = clps_readl(SYSFLG1);
- if (status & SYSFLG1_DCD)
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ if (sysflg & SYSFLG1_DCD)
result |= TIOCM_CAR;
- if (status & SYSFLG1_DSR)
+ if (sysflg & SYSFLG1_DSR)
result |= TIOCM_DSR;
- if (status & SYSFLG1_CTS)
+ if (sysflg & SYSFLG1_CTS)
result |= TIOCM_CTS;
} else
result = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
@@ -194,65 +224,53 @@ static void uart_clps711x_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void uart_clps711x_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long flags;
unsigned int ubrlcr;
- spin_lock_irqsave(&port->lock, flags);
-
- ubrlcr = clps_readl(UBRLCR(port));
+ ubrlcr = readl(port->membase + UBRLCR_OFFSET);
if (break_state)
ubrlcr |= UBRLCR_BREAK;
else
ubrlcr &= ~UBRLCR_BREAK;
- clps_writel(ubrlcr, UBRLCR(port));
+ writel(ubrlcr, port->membase + UBRLCR_OFFSET);
+}
+
+static void uart_clps711x_set_ldisc(struct uart_port *port, int ld)
+{
+ if (!port->line) {
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
- spin_unlock_irqrestore(&port->lock, flags);
+ regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON1_SIREN,
+ (ld == N_IRDA) ? SYSCON1_SIREN : 0);
+ }
}
static int uart_clps711x_startup(struct uart_port *port)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- int ret;
-
- s->tx_enabled[port->line] = 1;
- /* Allocate the IRQs */
- ret = devm_request_irq(port->dev, TX_IRQ(port), uart_clps711x_int_tx,
- 0, UART_CLPS711X_NAME " TX", port);
- if (ret)
- return ret;
-
- ret = devm_request_irq(port->dev, RX_IRQ(port), uart_clps711x_int_rx,
- 0, UART_CLPS711X_NAME " RX", port);
- if (ret) {
- devm_free_irq(port->dev, TX_IRQ(port), port);
- return ret;
- }
/* Disable break */
- clps_writel(clps_readl(UBRLCR(port)) & ~UBRLCR_BREAK, UBRLCR(port));
+ writel(readl(port->membase + UBRLCR_OFFSET) & ~UBRLCR_BREAK,
+ port->membase + UBRLCR_OFFSET);
/* Enable the port */
- clps_writel(clps_readl(SYSCON(port)) | SYSCON_UARTEN, SYSCON(port));
-
- return 0;
+ return regmap_update_bits(s->syscon, SYSCON_OFFSET,
+ SYSCON_UARTEN, SYSCON_UARTEN);
}
static void uart_clps711x_shutdown(struct uart_port *port)
{
- /* Free the interrupts */
- devm_free_irq(port->dev, TX_IRQ(port), port);
- devm_free_irq(port->dev, RX_IRQ(port), port);
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
/* Disable the port */
- clps_writel(clps_readl(SYSCON(port)) & ~SYSCON_UARTEN, SYSCON(port));
+ regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON_UARTEN, 0);
}
static void uart_clps711x_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- unsigned int ubrlcr, baud, quot;
- unsigned long flags;
+ u32 ubrlcr;
+ unsigned int baud, quot;
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
@@ -291,8 +309,6 @@ static void uart_clps711x_set_termios(struct uart_port *port,
/* Enable FIFO */
ubrlcr |= UBRLCR_FIFOEN;
- spin_lock_irqsave(&port->lock, flags);
-
/* Set read status mask */
port->read_status_mask = UARTDR_OVERR;
if (termios->c_iflag & INPCK)
@@ -306,9 +322,7 @@ static void uart_clps711x_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
- clps_writel(ubrlcr | (quot - 1), UBRLCR(port));
-
- spin_unlock_irqrestore(&port->lock, flags);
+ writel(ubrlcr | (quot - 1), port->membase + UBRLCR_OFFSET);
}
static const char *uart_clps711x_type(struct uart_port *port)
@@ -322,14 +336,12 @@ static void uart_clps711x_config_port(struct uart_port *port, int flags)
port->type = PORT_CLPS711X;
}
-static void uart_clps711x_release_port(struct uart_port *port)
+static void uart_clps711x_nop_void(struct uart_port *port)
{
- /* Do nothing */
}
-static int uart_clps711x_request_port(struct uart_port *port)
+static int uart_clps711x_nop_int(struct uart_port *port)
{
- /* Do nothing */
return 0;
}
@@ -339,181 +351,237 @@ static const struct uart_ops uart_clps711x_ops = {
.get_mctrl = uart_clps711x_get_mctrl,
.stop_tx = uart_clps711x_stop_tx,
.start_tx = uart_clps711x_start_tx,
- .stop_rx = uart_clps711x_stop_rx,
- .enable_ms = uart_clps711x_enable_ms,
+ .stop_rx = uart_clps711x_nop_void,
+ .enable_ms = uart_clps711x_nop_void,
.break_ctl = uart_clps711x_break_ctl,
+ .set_ldisc = uart_clps711x_set_ldisc,
.startup = uart_clps711x_startup,
.shutdown = uart_clps711x_shutdown,
.set_termios = uart_clps711x_set_termios,
.type = uart_clps711x_type,
.config_port = uart_clps711x_config_port,
- .release_port = uart_clps711x_release_port,
- .request_port = uart_clps711x_request_port,
+ .release_port = uart_clps711x_nop_void,
+ .request_port = uart_clps711x_nop_int,
};
#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
static void uart_clps711x_console_putchar(struct uart_port *port, int ch)
{
- while (clps_readl(SYSFLG(port)) & SYSFLG_UTXFF)
- barrier();
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ u32 sysflg = 0;
+
+ do {
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ } while (sysflg & SYSFLG_UTXFF);
- clps_writew(ch, UARTDR(port));
+ writew(ch, port->membase + UARTDR_OFFSET);
}
static void uart_clps711x_console_write(struct console *co, const char *c,
unsigned n)
{
- struct clps711x_port *s = (struct clps711x_port *)co->data;
- struct uart_port *port = &s->port[co->index];
- u32 syscon;
-
- /* Ensure that the port is enabled */
- syscon = clps_readl(SYSCON(port));
- clps_writel(syscon | SYSCON_UARTEN, SYSCON(port));
+ struct uart_port *port = clps711x_uart.state[co->index].uart_port;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ u32 sysflg = 0;
uart_console_write(port, c, n, uart_clps711x_console_putchar);
/* Wait for transmitter to become empty */
- while (clps_readl(SYSFLG(port)) & SYSFLG_UBUSY)
- barrier();
-
- /* Restore the uart state */
- clps_writel(syscon, SYSCON(port));
+ do {
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ } while (sysflg & SYSFLG_UBUSY);
}
-static void uart_clps711x_console_get_options(struct uart_port *port,
- int *baud, int *parity,
- int *bits)
+static int uart_clps711x_console_setup(struct console *co, char *options)
{
- if (clps_readl(SYSCON(port)) & SYSCON_UARTEN) {
- unsigned int ubrlcr, quot;
+ int baud = 38400, bits = 8, parity = 'n', flow = 'n';
+ int ret, index = co->index;
+ struct clps711x_port *s;
+ struct uart_port *port;
+ unsigned int quot;
+ u32 ubrlcr;
- ubrlcr = clps_readl(UBRLCR(port));
+ if (index < 0 || index >= UART_CLPS711X_NR)
+ return -EINVAL;
- *parity = 'n';
- if (ubrlcr & UBRLCR_PRTEN) {
- if (ubrlcr & UBRLCR_EVENPRT)
- *parity = 'e';
- else
- *parity = 'o';
- }
+ port = clps711x_uart.state[index].uart_port;
+ if (!port)
+ return -ENODEV;
- if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
- *bits = 7;
- else
- *bits = 8;
+ s = dev_get_drvdata(port->dev);
- quot = ubrlcr & UBRLCR_BAUD_MASK;
- *baud = port->uartclk / (16 * (quot + 1));
- }
-}
+ if (!options) {
+ u32 syscon = 0;
-static int uart_clps711x_console_setup(struct console *co, char *options)
-{
- int baud = 38400, bits = 8, parity = 'n', flow = 'n';
- struct clps711x_port *s = (struct clps711x_port *)co->data;
- struct uart_port *port = &s->port[(co->index > 0) ? co->index : 0];
+ regmap_read(s->syscon, SYSCON_OFFSET, &syscon);
+ if (syscon & SYSCON_UARTEN) {
+ ubrlcr = readl(port->membase + UBRLCR_OFFSET);
+
+ if (ubrlcr & UBRLCR_PRTEN) {
+ if (ubrlcr & UBRLCR_EVENPRT)
+ parity = 'e';
+ else
+ parity = 'o';
+ }
+
+ if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
+ bits = 7;
- if (options)
+ quot = ubrlcr & UBRLCR_BAUD_MASK;
+ baud = port->uartclk / (16 * (quot + 1));
+ }
+ } else
uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- uart_clps711x_console_get_options(port, &baud, &parity, &bits);
- return uart_set_options(port, co, baud, parity, bits, flow);
+ ret = uart_set_options(port, co, baud, parity, bits, flow);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(s->syscon, SYSCON_OFFSET,
+ SYSCON_UARTEN, SYSCON_UARTEN);
}
+
+static struct console clps711x_console = {
+ .name = UART_CLPS711X_DEVNAME,
+ .device = uart_console_device,
+ .write = uart_clps711x_console_write,
+ .setup = uart_clps711x_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
#endif
static int uart_clps711x_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ int ret, index = np ? of_alias_get_id(np, "serial") : pdev->id;
struct clps711x_port *s;
- int ret, i;
+ struct resource *res;
+ struct clk *uart_clk;
- s = devm_kzalloc(&pdev->dev, sizeof(struct clps711x_port), GFP_KERNEL);
- if (!s) {
- dev_err(&pdev->dev, "Error allocating port structure\n");
+ if (index < 0 || index >= UART_CLPS711X_NR)
+ return -EINVAL;
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
return -ENOMEM;
+
+ uart_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(uart_clk))
+ return PTR_ERR(uart_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ s->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(s->port.membase))
+ return PTR_ERR(s->port.membase);
+
+ s->port.irq = platform_get_irq(pdev, 0);
+ if (IS_ERR_VALUE(s->port.irq))
+ return s->port.irq;
+
+ s->rx_irq = platform_get_irq(pdev, 1);
+ if (IS_ERR_VALUE(s->rx_irq))
+ return s->rx_irq;
+
+ if (!np) {
+ char syscon_name[9];
+
+ sprintf(syscon_name, "syscon.%i", index + 1);
+ s->syscon = syscon_regmap_lookup_by_pdevname(syscon_name);
+ if (IS_ERR(s->syscon))
+ return PTR_ERR(s->syscon);
+
+ s->use_ms = !index;
+ } else {
+ s->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(s->syscon))
+ return PTR_ERR(s->syscon);
+
+ if (!index)
+ s->use_ms = of_property_read_bool(np, "uart-use-ms");
}
+
+ s->port.line = index;
+ s->port.dev = &pdev->dev;
+ s->port.iotype = UPIO_MEM32;
+ s->port.mapbase = res->start;
+ s->port.type = PORT_CLPS711X;
+ s->port.fifosize = 16;
+ s->port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
+ s->port.uartclk = clk_get_rate(uart_clk);
+ s->port.ops = &uart_clps711x_ops;
+
platform_set_drvdata(pdev, s);
- s->uart_clk = devm_clk_get(&pdev->dev, "uart");
- if (IS_ERR(s->uart_clk)) {
- dev_err(&pdev->dev, "Can't get UART clocks\n");
- return PTR_ERR(s->uart_clk);
- }
+ ret = uart_add_one_port(&clps711x_uart, &s->port);
+ if (ret)
+ return ret;
- s->uart.owner = THIS_MODULE;
- s->uart.dev_name = "ttyCL";
- s->uart.major = UART_CLPS711X_MAJOR;
- s->uart.minor = UART_CLPS711X_MINOR;
- s->uart.nr = UART_CLPS711X_NR;
-#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
- s->uart.cons = &s->console;
- s->uart.cons->device = uart_console_device;
- s->uart.cons->write = uart_clps711x_console_write;
- s->uart.cons->setup = uart_clps711x_console_setup;
- s->uart.cons->flags = CON_PRINTBUFFER;
- s->uart.cons->index = -1;
- s->uart.cons->data = s;
- strcpy(s->uart.cons->name, "ttyCL");
-#endif
- ret = uart_register_driver(&s->uart);
+ /* Disable port */
+ if (!uart_console(&s->port))
+ regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON_UARTEN, 0);
+
+ s->tx_enabled = 1;
+
+ ret = devm_request_irq(&pdev->dev, s->port.irq, uart_clps711x_int_tx, 0,
+ dev_name(&pdev->dev), &s->port);
if (ret) {
- dev_err(&pdev->dev, "Registering UART driver failed\n");
+ uart_remove_one_port(&clps711x_uart, &s->port);
return ret;
}
- for (i = 0; i < UART_CLPS711X_NR; i++) {
- s->port[i].line = i;
- s->port[i].dev = &pdev->dev;
- s->port[i].irq = TX_IRQ(&s->port[i]);
- s->port[i].iobase = SYSCON(&s->port[i]);
- s->port[i].type = PORT_CLPS711X;
- s->port[i].fifosize = 16;
- s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
- s->port[i].uartclk = clk_get_rate(s->uart_clk);
- s->port[i].ops = &uart_clps711x_ops;
- WARN_ON(uart_add_one_port(&s->uart, &s->port[i]));
- }
+ ret = devm_request_irq(&pdev->dev, s->rx_irq, uart_clps711x_int_rx, 0,
+ dev_name(&pdev->dev), &s->port);
+ if (ret)
+ uart_remove_one_port(&clps711x_uart, &s->port);
- return 0;
+ return ret;
}
static int uart_clps711x_remove(struct platform_device *pdev)
{
struct clps711x_port *s = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < UART_CLPS711X_NR; i++)
- uart_remove_one_port(&s->uart, &s->port[i]);
-
- uart_unregister_driver(&s->uart);
-
- return 0;
+ return uart_remove_one_port(&clps711x_uart, &s->port);
}
-static struct platform_driver clps711x_uart_driver = {
+static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
+ { .compatible = "cirrus,clps711x-uart", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids);
+
+static struct platform_driver clps711x_uart_platform = {
.driver = {
- .name = UART_CLPS711X_NAME,
- .owner = THIS_MODULE,
+ .name = "clps711x-uart",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(clps711x_uart_dt_ids),
},
.probe = uart_clps711x_probe,
.remove = uart_clps711x_remove,
};
-module_platform_driver(clps711x_uart_driver);
-
-static struct platform_device clps711x_uart_device = {
- .name = UART_CLPS711X_NAME,
-};
static int __init uart_clps711x_init(void)
{
- return platform_device_register(&clps711x_uart_device);
+ int ret;
+
+#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
+ clps711x_uart.cons = &clps711x_console;
+ clps711x_console.data = &clps711x_uart;
+#endif
+
+ ret = uart_register_driver(&clps711x_uart);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&clps711x_uart_platform);
}
module_init(uart_clps711x_init);
static void __exit uart_clps711x_exit(void)
{
- platform_device_unregister(&clps711x_uart_device);
+ platform_driver_unregister(&clps711x_uart_platform);
+ uart_unregister_driver(&clps711x_uart);
}
module_exit(uart_clps711x_exit);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 527a969..6d3b22e 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -29,7 +29,6 @@
#include <linux/tty.h>
#include <linux/gfp.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index a4927e6..f46d2ca 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -29,7 +29,6 @@
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b2cfdb6..d799140 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -806,6 +806,9 @@ static unsigned int imx_get_mctrl(struct uart_port *port)
if (readl(sport->port.membase + UCR2) & UCR2_CTS)
tmp |= TIOCM_RTS;
+ if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP)
+ tmp |= TIOCM_LOOP;
+
return tmp;
}
@@ -821,6 +824,11 @@ static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
temp |= UCR2_CTS;
writel(temp, sport->port.membase + UCR2);
+
+ temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
+ if (mctrl & TIOCM_LOOP)
+ temp |= UTS_LOOP;
+ writel(temp, sport->port.membase + uts_reg(sport));
}
/*
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 5dafcf1..5f673b7 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/compiler.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/atomic.h>
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index d8b6fee..aa97fd8 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -734,9 +734,12 @@ static void mxs_auart_reset(struct uart_port *u)
static int mxs_auart_startup(struct uart_port *u)
{
+ int ret;
struct mxs_auart_port *s = to_auart_port(u);
- clk_prepare_enable(s->clk);
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
@@ -957,7 +960,9 @@ auart_console_setup(struct console *co, char *options)
if (!s)
return -ENODEV;
- clk_prepare_enable(s->clk);
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 2caf9c6..9924660 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 0aa2b52..9cbd3ac 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1853,7 +1853,6 @@ static void pch_uart_exit_port(struct eg20t_port *priv)
debugfs_remove(priv->debugfs);
#endif
uart_remove_one_port(&pch_uart_driver, &priv->port);
- pci_set_drvdata(priv->pdev, NULL);
free_page((unsigned long)priv->rxbuf.buf);
}
@@ -1907,7 +1906,7 @@ static int pch_uart_pci_resume(struct pci_dev *pdev)
#define pch_uart_pci_resume NULL
#endif
-static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
+static const struct pci_device_id pch_uart_pci_id[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8811),
.driver_data = pch_et20t_uart0},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8812),
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 328d6de..056f91b 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -810,7 +810,7 @@ static void rp2_remove(struct pci_dev *pdev)
rp2_remove_ports(card);
}
-static DEFINE_PCI_DEVICE_TABLE(rp2_pci_tbl) = {
+static const struct pci_device_id rp2_pci_tbl[] = {
/* RocketPort INFINITY cards */
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
deleted file mode 100644
index 887b4f7..0000000
--- a/drivers/tty/serial/sc26xx.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/*
- * SC268xx.c: Serial driver for Philiphs SC2681/SC2692 devices.
- *
- * Copyright (C) 2006,2007 Thomas Bogendörfer (tsbogend@alpha.franken.de)
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/major.h>
-#include <linux/circ_buf.h>
-#include <linux/serial.h>
-#include <linux/sysrq.h>
-#include <linux/console.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#warning "Please try migrate to use new driver SCCNXP and report the status" \
- "in the linux-serial mailing list."
-
-#if defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/serial_core.h>
-
-#define SC26XX_MAJOR 204
-#define SC26XX_MINOR_START 205
-#define SC26XX_NR 2
-
-struct uart_sc26xx_port {
- struct uart_port port[2];
- u8 dsr_mask[2];
- u8 cts_mask[2];
- u8 dcd_mask[2];
- u8 ri_mask[2];
- u8 dtr_mask[2];
- u8 rts_mask[2];
- u8 imr;
-};
-
-/* register common to both ports */
-#define RD_ISR 0x14
-#define RD_IPR 0x34
-
-#define WR_ACR 0x10
-#define WR_IMR 0x14
-#define WR_OPCR 0x34
-#define WR_OPR_SET 0x38
-#define WR_OPR_CLR 0x3C
-
-/* access common register */
-#define READ_SC(p, r) readb((p)->membase + RD_##r)
-#define WRITE_SC(p, r, v) writeb((v), (p)->membase + WR_##r)
-
-/* register per port */
-#define RD_PORT_MRx 0x00
-#define RD_PORT_SR 0x04
-#define RD_PORT_RHR 0x0c
-
-#define WR_PORT_MRx 0x00
-#define WR_PORT_CSR 0x04
-#define WR_PORT_CR 0x08
-#define WR_PORT_THR 0x0c
-
-/* SR bits */
-#define SR_BREAK (1 << 7)
-#define SR_FRAME (1 << 6)
-#define SR_PARITY (1 << 5)
-#define SR_OVERRUN (1 << 4)
-#define SR_TXRDY (1 << 2)
-#define SR_RXRDY (1 << 0)
-
-#define CR_RES_MR (1 << 4)
-#define CR_RES_RX (2 << 4)
-#define CR_RES_TX (3 << 4)
-#define CR_STRT_BRK (6 << 4)
-#define CR_STOP_BRK (7 << 4)
-#define CR_DIS_TX (1 << 3)
-#define CR_ENA_TX (1 << 2)
-#define CR_DIS_RX (1 << 1)
-#define CR_ENA_RX (1 << 0)
-
-/* ISR bits */
-#define ISR_RXRDYB (1 << 5)
-#define ISR_TXRDYB (1 << 4)
-#define ISR_RXRDYA (1 << 1)
-#define ISR_TXRDYA (1 << 0)
-
-/* IMR bits */
-#define IMR_RXRDY (1 << 1)
-#define IMR_TXRDY (1 << 0)
-
-/* access port register */
-static inline u8 read_sc_port(struct uart_port *p, u8 reg)
-{
- return readb(p->membase + p->line * 0x20 + reg);
-}
-
-static inline void write_sc_port(struct uart_port *p, u8 reg, u8 val)
-{
- writeb(val, p->membase + p->line * 0x20 + reg);
-}
-
-#define READ_SC_PORT(p, r) read_sc_port(p, RD_PORT_##r)
-#define WRITE_SC_PORT(p, r, v) write_sc_port(p, WR_PORT_##r, v)
-
-static void sc26xx_enable_irq(struct uart_port *port, int mask)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
-
- up->imr |= mask << (line * 4);
- WRITE_SC(port, IMR, up->imr);
-}
-
-static void sc26xx_disable_irq(struct uart_port *port, int mask)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
-
- up->imr &= ~(mask << (line * 4));
- WRITE_SC(port, IMR, up->imr);
-}
-
-static bool receive_chars(struct uart_port *port)
-{
- struct tty_port *tport = NULL;
- int limit = 10000;
- unsigned char ch;
- char flag;
- u8 status;
-
- /* FIXME what is this trying to achieve? */
- if (port->state != NULL) /* Unopened serial console */
- tport = &port->state->port;
-
- while (limit-- > 0) {
- status = READ_SC_PORT(port, SR);
- if (!(status & SR_RXRDY))
- break;
- ch = READ_SC_PORT(port, RHR);
-
- flag = TTY_NORMAL;
- port->icount.rx++;
-
- if (unlikely(status & (SR_BREAK | SR_FRAME |
- SR_PARITY | SR_OVERRUN))) {
- if (status & SR_BREAK) {
- status &= ~(SR_PARITY | SR_FRAME);
- port->icount.brk++;
- if (uart_handle_break(port))
- continue;
- } else if (status & SR_PARITY)
- port->icount.parity++;
- else if (status & SR_FRAME)
- port->icount.frame++;
- if (status & SR_OVERRUN)
- port->icount.overrun++;
-
- status &= port->read_status_mask;
- if (status & SR_BREAK)
- flag = TTY_BREAK;
- else if (status & SR_PARITY)
- flag = TTY_PARITY;
- else if (status & SR_FRAME)
- flag = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- continue;
-
- if (status & port->ignore_status_mask)
- continue;
-
- tty_insert_flip_char(tport, ch, flag);
- }
- return !!tport;
-}
-
-static void transmit_chars(struct uart_port *port)
-{
- struct circ_buf *xmit;
-
- if (!port->state)
- return;
-
- xmit = &port->state->xmit;
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- sc26xx_disable_irq(port, IMR_TXRDY);
- return;
- }
- while (!uart_circ_empty(xmit)) {
- if (!(READ_SC_PORT(port, SR) & SR_TXRDY))
- break;
-
- WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- }
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-}
-
-static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
-{
- struct uart_sc26xx_port *up = dev_id;
- unsigned long flags;
- bool push;
- u8 isr;
-
- spin_lock_irqsave(&up->port[0].lock, flags);
-
- push = false;
- isr = READ_SC(&up->port[0], ISR);
- if (isr & ISR_TXRDYA)
- transmit_chars(&up->port[0]);
- if (isr & ISR_RXRDYA)
- push = receive_chars(&up->port[0]);
-
- spin_unlock(&up->port[0].lock);
-
- if (push)
- tty_flip_buffer_push(&up->port[0].state->port);
-
- spin_lock(&up->port[1].lock);
-
- push = false;
- if (isr & ISR_TXRDYB)
- transmit_chars(&up->port[1]);
- if (isr & ISR_RXRDYB)
- push = receive_chars(&up->port[1]);
-
- spin_unlock_irqrestore(&up->port[1].lock, flags);
-
- if (push)
- tty_flip_buffer_push(&up->port[1].state->port);
-
- return IRQ_HANDLED;
-}
-
-/* port->lock is not held. */
-static unsigned int sc26xx_tx_empty(struct uart_port *port)
-{
- return (READ_SC_PORT(port, SR) & SR_TXRDY) ? TIOCSER_TEMT : 0;
-}
-
-/* port->lock held by caller. */
-static void sc26xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
-
- if (up->dtr_mask[line]) {
- if (mctrl & TIOCM_DTR)
- WRITE_SC(port, OPR_SET, up->dtr_mask[line]);
- else
- WRITE_SC(port, OPR_CLR, up->dtr_mask[line]);
- }
- if (up->rts_mask[line]) {
- if (mctrl & TIOCM_RTS)
- WRITE_SC(port, OPR_SET, up->rts_mask[line]);
- else
- WRITE_SC(port, OPR_CLR, up->rts_mask[line]);
- }
-}
-
-/* port->lock is held by caller and interrupts are disabled. */
-static unsigned int sc26xx_get_mctrl(struct uart_port *port)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
- unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
- u8 ipr;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
- ipr = READ_SC(port, IPR) ^ 0xff;
-
- if (up->dsr_mask[line]) {
- mctrl &= ~TIOCM_DSR;
- mctrl |= ipr & up->dsr_mask[line] ? TIOCM_DSR : 0;
- }
- if (up->cts_mask[line]) {
- mctrl &= ~TIOCM_CTS;
- mctrl |= ipr & up->cts_mask[line] ? TIOCM_CTS : 0;
- }
- if (up->dcd_mask[line]) {
- mctrl &= ~TIOCM_CAR;
- mctrl |= ipr & up->dcd_mask[line] ? TIOCM_CAR : 0;
- }
- if (up->ri_mask[line]) {
- mctrl &= ~TIOCM_RNG;
- mctrl |= ipr & up->ri_mask[line] ? TIOCM_RNG : 0;
- }
- return mctrl;
-}
-
-/* port->lock held by caller. */
-static void sc26xx_stop_tx(struct uart_port *port)
-{
- return;
-}
-
-/* port->lock held by caller. */
-static void sc26xx_start_tx(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->state->xmit;
-
- while (!uart_circ_empty(xmit)) {
- if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) {
- sc26xx_enable_irq(port, IMR_TXRDY);
- break;
- }
- WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- }
-}
-
-/* port->lock held by caller. */
-static void sc26xx_stop_rx(struct uart_port *port)
-{
-}
-
-/* port->lock held by caller. */
-static void sc26xx_enable_ms(struct uart_port *port)
-{
-}
-
-/* port->lock is not held. */
-static void sc26xx_break_ctl(struct uart_port *port, int break_state)
-{
- if (break_state == -1)
- WRITE_SC_PORT(port, CR, CR_STRT_BRK);
- else
- WRITE_SC_PORT(port, CR, CR_STOP_BRK);
-}
-
-/* port->lock is not held. */
-static int sc26xx_startup(struct uart_port *port)
-{
- sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
- WRITE_SC(port, OPCR, 0);
-
- /* reset tx and rx */
- WRITE_SC_PORT(port, CR, CR_RES_RX);
- WRITE_SC_PORT(port, CR, CR_RES_TX);
-
- /* start rx/tx */
- WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
-
- /* enable irqs */
- sc26xx_enable_irq(port, IMR_RXRDY);
- return 0;
-}
-
-/* port->lock is not held. */
-static void sc26xx_shutdown(struct uart_port *port)
-{
- /* disable interrupst */
- sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
-
- /* stop tx/rx */
- WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
-}
-
-/* port->lock is not held. */
-static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
- unsigned int quot = uart_get_divisor(port, baud);
- unsigned int iflag, cflag;
- unsigned long flags;
- u8 mr1, mr2, csr;
-
- spin_lock_irqsave(&port->lock, flags);
-
- while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
- udelay(2);
-
- WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
-
- iflag = termios->c_iflag;
- cflag = termios->c_cflag;
-
- port->read_status_mask = SR_OVERRUN;
- if (iflag & INPCK)
- port->read_status_mask |= SR_PARITY | SR_FRAME;
- if (iflag & (BRKINT | PARMRK))
- port->read_status_mask |= SR_BREAK;
-
- port->ignore_status_mask = 0;
- if (iflag & IGNBRK)
- port->ignore_status_mask |= SR_BREAK;
- if ((cflag & CREAD) == 0)
- port->ignore_status_mask |= SR_BREAK | SR_FRAME |
- SR_PARITY | SR_OVERRUN;
-
- switch (cflag & CSIZE) {
- case CS5:
- mr1 = 0x00;
- break;
- case CS6:
- mr1 = 0x01;
- break;
- case CS7:
- mr1 = 0x02;
- break;
- default:
- case CS8:
- mr1 = 0x03;
- break;
- }
- mr2 = 0x07;
- if (cflag & CSTOPB)
- mr2 = 0x0f;
- if (cflag & PARENB) {
- if (cflag & PARODD)
- mr1 |= (1 << 2);
- } else
- mr1 |= (2 << 3);
-
- switch (baud) {
- case 50:
- csr = 0x00;
- break;
- case 110:
- csr = 0x11;
- break;
- case 134:
- csr = 0x22;
- break;
- case 200:
- csr = 0x33;
- break;
- case 300:
- csr = 0x44;
- break;
- case 600:
- csr = 0x55;
- break;
- case 1200:
- csr = 0x66;
- break;
- case 2400:
- csr = 0x88;
- break;
- case 4800:
- csr = 0x99;
- break;
- default:
- case 9600:
- csr = 0xbb;
- break;
- case 19200:
- csr = 0xcc;
- break;
- }
-
- WRITE_SC_PORT(port, CR, CR_RES_MR);
- WRITE_SC_PORT(port, MRx, mr1);
- WRITE_SC_PORT(port, MRx, mr2);
-
- WRITE_SC(port, ACR, 0x80);
- WRITE_SC_PORT(port, CSR, csr);
-
- /* reset tx and rx */
- WRITE_SC_PORT(port, CR, CR_RES_RX);
- WRITE_SC_PORT(port, CR, CR_RES_TX);
-
- WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
- while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
- udelay(2);
-
- /* XXX */
- uart_update_timeout(port, cflag,
- (port->uartclk / (16 * quot)));
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char *sc26xx_type(struct uart_port *port)
-{
- return "SC26XX";
-}
-
-static void sc26xx_release_port(struct uart_port *port)
-{
-}
-
-static int sc26xx_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-static void sc26xx_config_port(struct uart_port *port, int flags)
-{
-}
-
-static int sc26xx_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- return -EINVAL;
-}
-
-static struct uart_ops sc26xx_ops = {
- .tx_empty = sc26xx_tx_empty,
- .set_mctrl = sc26xx_set_mctrl,
- .get_mctrl = sc26xx_get_mctrl,
- .stop_tx = sc26xx_stop_tx,
- .start_tx = sc26xx_start_tx,
- .stop_rx = sc26xx_stop_rx,
- .enable_ms = sc26xx_enable_ms,
- .break_ctl = sc26xx_break_ctl,
- .startup = sc26xx_startup,
- .shutdown = sc26xx_shutdown,
- .set_termios = sc26xx_set_termios,
- .type = sc26xx_type,
- .release_port = sc26xx_release_port,
- .request_port = sc26xx_request_port,
- .config_port = sc26xx_config_port,
- .verify_port = sc26xx_verify_port,
-};
-
-static struct uart_port *sc26xx_port;
-
-#ifdef CONFIG_SERIAL_SC26XX_CONSOLE
-static void sc26xx_console_putchar(struct uart_port *port, char c)
-{
- unsigned long flags;
- int limit = 1000000;
-
- spin_lock_irqsave(&port->lock, flags);
-
- while (limit-- > 0) {
- if (READ_SC_PORT(port, SR) & SR_TXRDY) {
- WRITE_SC_PORT(port, THR, c);
- break;
- }
- udelay(2);
- }
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void sc26xx_console_write(struct console *con, const char *s, unsigned n)
-{
- struct uart_port *port = sc26xx_port;
- int i;
-
- for (i = 0; i < n; i++) {
- if (*s == '\n')
- sc26xx_console_putchar(port, '\r');
- sc26xx_console_putchar(port, *s++);
- }
-}
-
-static int __init sc26xx_console_setup(struct console *con, char *options)
-{
- struct uart_port *port = sc26xx_port;
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (port->type != PORT_SC26XX)
- return -1;
-
- printk(KERN_INFO "Console: ttySC%d (SC26XX)\n", con->index);
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, con, baud, parity, bits, flow);
-}
-
-static struct uart_driver sc26xx_reg;
-static struct console sc26xx_console = {
- .name = "ttySC",
- .write = sc26xx_console_write,
- .device = uart_console_device,
- .setup = sc26xx_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &sc26xx_reg,
-};
-#define SC26XX_CONSOLE &sc26xx_console
-#else
-#define SC26XX_CONSOLE NULL
-#endif
-
-static struct uart_driver sc26xx_reg = {
- .owner = THIS_MODULE,
- .driver_name = "SC26xx",
- .dev_name = "ttySC",
- .major = SC26XX_MAJOR,
- .minor = SC26XX_MINOR_START,
- .nr = SC26XX_NR,
- .cons = SC26XX_CONSOLE,
-};
-
-static u8 sc26xx_flags2mask(unsigned int flags, unsigned int bitpos)
-{
- unsigned int bit = (flags >> bitpos) & 15;
-
- return bit ? (1 << (bit - 1)) : 0;
-}
-
-static void sc26xx_init_masks(struct uart_sc26xx_port *up,
- int line, unsigned int data)
-{
- up->dtr_mask[line] = sc26xx_flags2mask(data, 0);
- up->rts_mask[line] = sc26xx_flags2mask(data, 4);
- up->dsr_mask[line] = sc26xx_flags2mask(data, 8);
- up->cts_mask[line] = sc26xx_flags2mask(data, 12);
- up->dcd_mask[line] = sc26xx_flags2mask(data, 16);
- up->ri_mask[line] = sc26xx_flags2mask(data, 20);
-}
-
-static int sc26xx_probe(struct platform_device *dev)
-{
- struct resource *res;
- struct uart_sc26xx_port *up;
- unsigned int *sc26xx_data = dev_get_platdata(&dev->dev);
- int err;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- up = kzalloc(sizeof *up, GFP_KERNEL);
- if (unlikely(!up))
- return -ENOMEM;
-
- up->port[0].line = 0;
- up->port[0].ops = &sc26xx_ops;
- up->port[0].type = PORT_SC26XX;
- up->port[0].uartclk = (29491200 / 16); /* arbitrary */
-
- up->port[0].mapbase = res->start;
- up->port[0].membase = ioremap_nocache(up->port[0].mapbase, 0x40);
- up->port[0].iotype = UPIO_MEM;
- up->port[0].irq = platform_get_irq(dev, 0);
-
- up->port[0].dev = &dev->dev;
-
- sc26xx_init_masks(up, 0, sc26xx_data[0]);
-
- sc26xx_port = &up->port[0];
-
- up->port[1].line = 1;
- up->port[1].ops = &sc26xx_ops;
- up->port[1].type = PORT_SC26XX;
- up->port[1].uartclk = (29491200 / 16); /* arbitrary */
-
- up->port[1].mapbase = up->port[0].mapbase;
- up->port[1].membase = up->port[0].membase;
- up->port[1].iotype = UPIO_MEM;
- up->port[1].irq = up->port[0].irq;
-
- up->port[1].dev = &dev->dev;
-
- sc26xx_init_masks(up, 1, sc26xx_data[1]);
-
- err = uart_register_driver(&sc26xx_reg);
- if (err)
- goto out_free_port;
-
- sc26xx_reg.tty_driver->name_base = sc26xx_reg.minor;
-
- err = uart_add_one_port(&sc26xx_reg, &up->port[0]);
- if (err)
- goto out_unregister_driver;
-
- err = uart_add_one_port(&sc26xx_reg, &up->port[1]);
- if (err)
- goto out_remove_port0;
-
- err = request_irq(up->port[0].irq, sc26xx_interrupt, 0, "sc26xx", up);
- if (err)
- goto out_remove_ports;
-
- platform_set_drvdata(dev, up);
- return 0;
-
-out_remove_ports:
- uart_remove_one_port(&sc26xx_reg, &up->port[1]);
-out_remove_port0:
- uart_remove_one_port(&sc26xx_reg, &up->port[0]);
-
-out_unregister_driver:
- uart_unregister_driver(&sc26xx_reg);
-
-out_free_port:
- kfree(up);
- sc26xx_port = NULL;
- return err;
-}
-
-
-static int __exit sc26xx_driver_remove(struct platform_device *dev)
-{
- struct uart_sc26xx_port *up = platform_get_drvdata(dev);
-
- free_irq(up->port[0].irq, up);
-
- uart_remove_one_port(&sc26xx_reg, &up->port[0]);
- uart_remove_one_port(&sc26xx_reg, &up->port[1]);
-
- uart_unregister_driver(&sc26xx_reg);
-
- kfree(up);
- sc26xx_port = NULL;
-
- return 0;
-}
-
-static struct platform_driver sc26xx_driver = {
- .probe = sc26xx_probe,
- .remove = sc26xx_driver_remove,
- .driver = {
- .name = "SC26xx",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(sc26xx_driver);
-
-MODULE_AUTHOR("Thomas Bogendörfer");
-MODULE_DESCRIPTION("SC681/SC2692 serial driver");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:SC26xx");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0f02351..ece2049 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1830,9 +1830,13 @@ uart_set_options(struct uart_port *port, struct console *co,
/*
* Ensure that the serial console lock is initialised
* early.
+ * If this port is a console, then the spinlock is already
+ * initialised.
*/
- spin_lock_init(&port->lock);
- lockdep_set_class(&port->lock, &port_lock_key);
+ if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) {
+ spin_lock_init(&port->lock);
+ lockdep_set_class(&port->lock, &port_lock_key);
+ }
memset(&termios, 0, sizeof(struct ktermios));
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index f186a8f..49a2ffd 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -524,9 +524,11 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
unsigned int count;
unsigned long flags;
+ struct dma_tx_state tx_state;
spin_lock_irqsave(&sirfport->rx_lock, flags);
- while (sirfport->rx_completed != sirfport->rx_issued) {
+ while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
+ sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
sirfport->rx_completed++;
@@ -709,8 +711,10 @@ static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long flags;
+ struct dma_tx_state tx_state;
spin_lock_irqsave(&sirfport->rx_lock, flags);
- while (sirfport->rx_completed != sirfport->rx_issued) {
+ while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
+ sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
@@ -1033,6 +1037,16 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
+static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ if (!state)
+ clk_prepare_enable(sirfport->clk);
+ else
+ clk_disable_unprepare(sirfport->clk);
+}
+
static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
@@ -1264,6 +1278,7 @@ static struct uart_ops sirfsoc_uart_ops = {
.startup = sirfsoc_uart_startup,
.shutdown = sirfsoc_uart_shutdown,
.set_termios = sirfsoc_uart_set_termios,
+ .pm = sirfsoc_uart_pm,
.type = sirfsoc_uart_type,
.release_port = sirfsoc_uart_release_port,
.request_port = sirfsoc_uart_request_port,
@@ -1486,7 +1501,6 @@ usp_no_flow_control:
ret = PTR_ERR(sirfport->clk);
goto err;
}
- clk_prepare_enable(sirfport->clk);
port->uartclk = clk_get_rate(sirfport->clk);
port->ops = &sirfsoc_uart_ops;
@@ -1502,7 +1516,6 @@ usp_no_flow_control:
return 0;
port_err:
- clk_disable_unprepare(sirfport->clk);
clk_put(sirfport->clk);
err:
return ret;
@@ -1512,38 +1525,42 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
{
struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
- clk_disable_unprepare(sirfport->clk);
clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int
-sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
+sirfsoc_uart_suspend(struct device *pdev)
{
- struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
uart_suspend_port(&sirfsoc_uart_drv, port);
return 0;
}
-static int sirfsoc_uart_resume(struct platform_device *pdev)
+static int sirfsoc_uart_resume(struct device *pdev)
{
- struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
uart_resume_port(&sirfsoc_uart_drv, port);
return 0;
}
+#endif
+
+static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
+};
static struct platform_driver sirfsoc_uart_driver = {
.probe = sirfsoc_uart_probe,
.remove = sirfsoc_uart_remove,
- .suspend = sirfsoc_uart_suspend,
- .resume = sirfsoc_uart_resume,
.driver = {
.name = SIRFUART_PORT_NAME,
.owner = THIS_MODULE,
.of_match_table = sirfsoc_uart_ids,
+ .pm = &sirfsoc_uart_pm_ops,
},
};
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index e1ce141..5ae14b4 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3404,8 +3404,8 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
- if (info->port.flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->port.close_wait);
+ wait_event_interruptible_tty(tty, info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 1abf946..c359a91 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -674,8 +674,8 @@ static int open(struct tty_struct *tty, struct file *filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
- if (info->port.flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->port.close_wait);
+ wait_event_interruptible_tty(tty, info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index dc6e969..144202e 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -754,8 +754,8 @@ static int open(struct tty_struct *tty, struct file *filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
- if (info->port.flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->port.close_wait);
+ wait_event_interruptible_tty(tty, info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index c043136f..765125d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -11,7 +11,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
@@ -26,7 +25,7 @@
* Byte threshold to limit memory consumption for flip buffers.
* The actual memory limit is > 2x this amount.
*/
-#define TTYB_MEM_LIMIT 65536
+#define TTYB_DEFAULT_MEM_LIMIT 65536
/*
* We default to dicing tty buffer allocations to this many characters
@@ -89,9 +88,10 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
int tty_buffer_space_avail(struct tty_port *port)
{
- int space = TTYB_MEM_LIMIT - atomic_read(&port->buf.memory_used);
+ int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
return max(space, 0);
}
+EXPORT_SYMBOL_GPL(tty_buffer_space_avail);
static void tty_buffer_reset(struct tty_buffer *p, size_t size)
{
@@ -100,6 +100,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p->next = NULL;
p->commit = 0;
p->read = 0;
+ p->flags = 0;
}
/**
@@ -129,7 +130,7 @@ void tty_buffer_free_all(struct tty_port *port)
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
- atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->mem_used, 0);
}
/**
@@ -162,7 +163,7 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
/* Should possibly check if this fails for the largest buffer we
have queued and recycle that ? */
- if (atomic_read(&port->buf.memory_used) > TTYB_MEM_LIMIT)
+ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
if (p == NULL)
@@ -170,7 +171,7 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
found:
tty_buffer_reset(p, size);
- atomic_add(size, &port->buf.memory_used);
+ atomic_add(size, &port->buf.mem_used);
return p;
}
@@ -188,7 +189,7 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
struct tty_bufhead *buf = &port->buf;
/* Dumb strategy for now - should keep some stats */
- WARN_ON(atomic_sub_return(b->size, &buf->memory_used) < 0);
+ WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0);
if (b->size > MIN_TTYB_SIZE)
kfree(b);
@@ -200,9 +201,7 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
* tty_buffer_flush - flush full tty buffers
* @tty: tty to flush
*
- * flush all the buffers containing receive data. If the buffer is
- * being processed by flush_to_ldisc then we defer the processing
- * to that function
+ * flush all the buffers containing receive data.
*
* Locking: takes buffer lock to ensure single-threaded flip buffer
* 'consumer'
@@ -230,31 +229,49 @@ void tty_buffer_flush(struct tty_struct *tty)
* tty_buffer_request_room - grow tty buffer if needed
* @tty: tty structure
* @size: size desired
+ * @flags: buffer flags if new buffer allocated (default = 0)
*
* Make at least size bytes of linear space available for the tty
* buffer. If we fail return the size we managed to find.
+ *
+ * Will change over to a new buffer if the current buffer is encoded as
+ * TTY_NORMAL (so has no flags buffer) and the new buffer requires
+ * a flags buffer.
*/
-int tty_buffer_request_room(struct tty_port *port, size_t size)
+static int __tty_buffer_request_room(struct tty_port *port, size_t size,
+ int flags)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *b, *n;
- int left;
+ int left, change;
b = buf->tail;
- left = b->size - b->used;
+ if (b->flags & TTYB_NORMAL)
+ left = 2 * b->size - b->used;
+ else
+ left = b->size - b->used;
- if (left < size) {
+ change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL);
+ if (change || left < size) {
/* This is the slow path - looking for new buffers to use */
if ((n = tty_buffer_alloc(port, size)) != NULL) {
+ n->flags = flags;
buf->tail = n;
b->commit = b->used;
smp_mb();
b->next = n;
- } else
+ } else if (change)
+ size = 0;
+ else
size = left;
}
return size;
}
+
+int tty_buffer_request_room(struct tty_port *port, size_t size)
+{
+ return __tty_buffer_request_room(port, size, 0);
+}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
@@ -274,12 +291,14 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
int copied = 0;
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space = tty_buffer_request_room(port, goal);
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+ int space = __tty_buffer_request_room(port, goal, flags);
struct tty_buffer *tb = port->buf.tail;
if (unlikely(space == 0))
break;
memcpy(char_buf_ptr(tb, tb->used), chars, space);
- memset(flag_buf_ptr(tb, tb->used), flag, space);
+ if (~tb->flags & TTYB_NORMAL)
+ memset(flag_buf_ptr(tb, tb->used), flag, space);
tb->used += space;
copied += space;
chars += space;
@@ -362,52 +381,28 @@ EXPORT_SYMBOL(tty_schedule_flip);
int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
- int space = tty_buffer_request_room(port, size);
+ int space = __tty_buffer_request_room(port, size, TTYB_NORMAL);
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
*chars = char_buf_ptr(tb, tb->used);
- memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
+ if (~tb->flags & TTYB_NORMAL)
+ memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
tb->used += space;
}
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
-/**
- * tty_prepare_flip_string_flags - make room for characters
- * @port: tty port
- * @chars: return pointer for character write area
- * @flags: return pointer for status flag write area
- * @size: desired size
- *
- * Prepare a block of space in the buffer for data. Returns the length
- * available and buffer pointer to the space which is now allocated and
- * accounted for as ready for characters. This is used for drivers
- * that need their own block copy routines into the buffer. There is no
- * guarantee the buffer is a DMA target!
- */
-
-int tty_prepare_flip_string_flags(struct tty_port *port,
- unsigned char **chars, char **flags, size_t size)
-{
- int space = tty_buffer_request_room(port, size);
- if (likely(space)) {
- struct tty_buffer *tb = port->buf.tail;
- *chars = char_buf_ptr(tb, tb->used);
- *flags = flag_buf_ptr(tb, tb->used);
- tb->used += space;
- }
- return space;
-}
-EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
-
static int
receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
{
struct tty_ldisc *disc = tty->ldisc;
unsigned char *p = char_buf_ptr(head, head->read);
- char *f = flag_buf_ptr(head, head->read);
+ char *f = NULL;
+
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->read);
if (disc->ops->receive_buf2)
count = disc->ops->receive_buf2(tty, p, f, count);
@@ -533,7 +528,25 @@ void tty_buffer_init(struct tty_port *port)
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
init_llist_head(&buf->free);
- atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->mem_used, 0);
atomic_set(&buf->priority, 0);
INIT_WORK(&buf->work, flush_to_ldisc);
+ buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT;
+}
+
+/**
+ * tty_buffer_set_limit - change the tty buffer memory limit
+ * @port: tty port to change
+ *
+ * Change the tty buffer memory limit.
+ * Must be called before the other tty buffer functions are used.
+ */
+
+int tty_buffer_set_limit(struct tty_port *port, int limit)
+{
+ if (limit < MIN_TTYB_SIZE)
+ return -EINVAL;
+ port->buf.mem_limit = limit;
+ return 0;
}
+EXPORT_SYMBOL_GPL(tty_buffer_set_limit);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 6458e11..2d822aa 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/wait.h>
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index c94d234..3f746c8 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -12,7 +12,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index f7beb6e..a673e5b 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -847,7 +847,7 @@ int __uio_register_device(struct module *owner,
info->uio_dev = idev;
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
- ret = devm_request_irq(parent, info->irq, uio_interrupt,
+ ret = devm_request_irq(idev->dev, info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
if (ret)
goto err_request_irq;
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index f764adb..d1f95a1 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -228,7 +228,7 @@ static void mf624_pci_remove(struct pci_dev *dev)
kfree(info);
}
-static DEFINE_PCI_DEVICE_TABLE(mf624_pci_id) = {
+static const struct pci_device_id mf624_pci_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) },
{ 0, }
};
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index a34fb98..2e6b832 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -108,6 +108,8 @@ source "drivers/usb/musb/Kconfig"
source "drivers/usb/dwc3/Kconfig"
+source "drivers/usb/dwc2/Kconfig"
+
source "drivers/usb/chipidea/Kconfig"
comment "USB port drivers"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 70d7c5b..1ae2bf3 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_USB) += core/
obj-$(CONFIG_USB_DWC3) += dwc3/
+obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_USB_MON) += mon/
diff --git a/drivers/staging/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index be947d6..be947d6 100644
--- a/drivers/staging/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
diff --git a/drivers/staging/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 11529d3..11529d3 100644
--- a/drivers/staging/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
diff --git a/drivers/staging/dwc2/core.c b/drivers/usb/dwc2/core.c
index 6d001b5..8565d87 100644
--- a/drivers/staging/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -114,7 +114,7 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
*/
-static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
+static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
{
u32 greset;
int count = 0;
@@ -129,7 +129,7 @@ static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
dev_warn(hsotg->dev,
"%s() HANG! AHB Idle GRSTCTL=%0x\n",
__func__, greset);
- return;
+ return -EBUSY;
}
} while (!(greset & GRSTCTL_AHBIDLE));
@@ -144,7 +144,7 @@ static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
dev_warn(hsotg->dev,
"%s() HANG! Soft Reset GRSTCTL=%0x\n",
__func__, greset);
- break;
+ return -EBUSY;
}
} while (greset & GRSTCTL_CSFTRST);
@@ -153,11 +153,14 @@ static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
* not stay in host mode after a connector ID change!
*/
usleep_range(150000, 200000);
+
+ return 0;
}
-static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg, i2cctl;
+ int retval = 0;
/*
* core_init() is now called on every switch so only call the
@@ -170,7 +173,12 @@ static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
writel(usbcfg, hsotg->regs + GUSBCFG);
/* Reset after a PHY select */
- dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s() Reset failed, aborting",
+ __func__);
+ return retval;
+ }
}
/*
@@ -198,14 +206,17 @@ static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
i2cctl |= GI2CCTL_I2CEN;
writel(i2cctl, hsotg->regs + GI2CCTL);
}
+
+ return retval;
}
-static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
+ int retval = 0;
if (!select_phy)
- return;
+ return -ENODEV;
usbcfg = readl(hsotg->regs + GUSBCFG);
@@ -238,20 +249,32 @@ static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
writel(usbcfg, hsotg->regs + GUSBCFG);
/* Reset after setting the PHY parameters */
- dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s() Reset failed, aborting",
+ __func__);
+ return retval;
+ }
+
+ return retval;
}
-static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
+ int retval = 0;
if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* If FS mode with FS PHY */
- dwc2_fs_phy_init(hsotg, select_phy);
+ retval = dwc2_fs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
} else {
/* High speed PHY */
- dwc2_hs_phy_init(hsotg, select_phy);
+ retval = dwc2_hs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
}
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
@@ -268,6 +291,8 @@ static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
writel(usbcfg, hsotg->regs + GUSBCFG);
}
+
+ return retval;
}
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
@@ -382,12 +407,19 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
writel(usbcfg, hsotg->regs + GUSBCFG);
/* Reset the Controller */
- dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
+ __func__);
+ return retval;
+ }
/*
* This needs to happen in FS mode before any other programming occurs
*/
- dwc2_phy_init(hsotg, select_phy);
+ retval = dwc2_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
/* Program the GAHBCFG Register */
retval = dwc2_gahbcfg_init(hsotg);
@@ -451,9 +483,6 @@ void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
writel(0, hsotg->regs + GINTMSK);
writel(0, hsotg->regs + HAINTMSK);
- /* Clear any pending interrupts */
- writel(0xffffffff, hsotg->regs + GINTSTS);
-
/* Enable the common interrupts */
dwc2_enable_common_interrupts(hsotg);
@@ -1912,13 +1941,12 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
udelay(1);
}
-#define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c))
+#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
/* Parameter access functions */
-int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
switch (val) {
case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
@@ -1964,17 +1992,14 @@ int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
break;
}
dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->otg_cap = val;
- return retval;
}
-int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
valid = 0;
@@ -1988,17 +2013,14 @@ int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->dma_enable = val;
- return retval;
}
-int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
!hsotg->hw_params.dma_desc_enable))
@@ -2014,19 +2036,15 @@ int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
val = (hsotg->core_params->dma_enable > 0 &&
hsotg->hw_params.dma_desc_enable);
dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->dma_desc_enable = val;
- return retval;
}
-int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val)
+void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
+ int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for host_support_fs_low_power\n");
@@ -2036,17 +2054,14 @@ int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
val = 0;
dev_dbg(hsotg->dev,
"Setting host_support_fs_low_power to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->host_support_fs_ls_low_power = val;
- return retval;
}
-int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
valid = 0;
@@ -2060,17 +2075,14 @@ int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.enable_dynamic_fifo;
dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->enable_dynamic_fifo = val;
- return retval;
}
-int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
valid = 0;
@@ -2082,17 +2094,14 @@ int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.host_rx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->host_rx_fifo_size = val;
- return retval;
}
-int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
valid = 0;
@@ -2105,17 +2114,14 @@ int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
val = hsotg->hw_params.host_nperio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
val);
- retval = -EINVAL;
}
hsotg->core_params->host_nperio_tx_fifo_size = val;
- return retval;
}
-int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
valid = 0;
@@ -2128,17 +2134,14 @@ int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
val = hsotg->hw_params.host_perio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
val);
- retval = -EINVAL;
}
hsotg->core_params->host_perio_tx_fifo_size = val;
- return retval;
}
-int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
valid = 0;
@@ -2150,17 +2153,14 @@ int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.max_transfer_size;
dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->max_transfer_size = val;
- return retval;
}
-int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 15 || val > hsotg->hw_params.max_packet_count)
valid = 0;
@@ -2172,17 +2172,14 @@ int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.max_packet_count;
dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->max_packet_count = val;
- return retval;
}
-int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 1 || val > hsotg->hw_params.host_channels)
valid = 0;
@@ -2194,38 +2191,26 @@ int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.host_channels;
dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->host_channels = val;
- return retval;
}
-int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
{
-#ifndef NO_FS_PHY_HW_CHECKS
int valid = 0;
u32 hs_phy_type, fs_phy_type;
-#endif
- int retval = 0;
- if (DWC2_PARAM_TEST(val, DWC2_PHY_TYPE_PARAM_FS,
- DWC2_PHY_TYPE_PARAM_ULPI)) {
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
+ DWC2_PHY_TYPE_PARAM_ULPI)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for phy_type\n");
dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
}
-#ifndef NO_FS_PHY_HW_CHECKS
valid = 0;
-#else
- val = DWC2_PHY_TYPE_PARAM_FS;
- dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- retval = -EINVAL;
-#endif
}
-#ifndef NO_FS_PHY_HW_CHECKS
hs_phy_type = hsotg->hw_params.hs_phy_type;
fs_phy_type = hsotg->hw_params.fs_phy_type;
if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
@@ -2254,12 +2239,9 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
val = DWC2_PHY_TYPE_PARAM_ULPI;
}
dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- retval = -EINVAL;
}
-#endif
hsotg->core_params->phy_type = val;
- return retval;
}
static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
@@ -2267,12 +2249,11 @@ static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
return hsotg->core_params->phy_type;
}
-int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for speed parameter\n");
dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
@@ -2292,20 +2273,17 @@ int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->speed = val;
- return retval;
}
-int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
- DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
+ DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for host_ls_low_power_phy_clk parameter\n");
@@ -2329,36 +2307,28 @@ int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
val);
- retval = -EINVAL;
}
hsotg->core_params->host_ls_low_power_phy_clk = val;
- return retval;
}
-int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
}
val = 0;
dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->phy_ulpi_ddr = val;
- return retval;
}
-int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for phy_ulpi_ext_vbus\n");
@@ -2367,17 +2337,14 @@ int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
}
val = 0;
dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->phy_ulpi_ext_vbus = val;
- return retval;
}
-int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
{
int valid = 0;
- int retval = 0;
switch (hsotg->hw_params.utmi_phy_data_width) {
case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
@@ -2400,72 +2367,52 @@ int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
val = (hsotg->hw_params.utmi_phy_data_width ==
GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->phy_utmi_width = val;
- return retval;
}
-int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
}
val = 0;
dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->ulpi_fs_ls = val;
- return retval;
}
-int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for ts_dline\n");
dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
}
val = 0;
dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->ts_dline = val;
- return retval;
}
-int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
{
-#ifndef NO_FS_PHY_HW_CHECKS
int valid = 1;
-#endif
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
}
-#ifndef NO_FS_PHY_HW_CHECKS
valid = 0;
-#else
- val = 0;
- dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- retval = -EINVAL;
-#endif
}
-#ifndef NO_FS_PHY_HW_CHECKS
if (val == 1 && !(hsotg->hw_params.i2c_enable))
valid = 0;
@@ -2476,20 +2423,16 @@ int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.i2c_enable;
dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- retval = -EINVAL;
}
-#endif
hsotg->core_params->i2c_enable = val;
- return retval;
}
-int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for en_multiple_tx_fifo,\n");
@@ -2509,19 +2452,16 @@ int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.en_multiple_tx_fifo;
dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->en_multiple_tx_fifo = val;
- return retval;
}
-int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"'%d' invalid for parameter reload_ctl\n", val);
@@ -2540,28 +2480,23 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->reload_ctl = val;
- return retval;
}
-int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
{
if (val != -1)
hsotg->core_params->ahbcfg = val;
else
hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
GAHBCFG_HBSTLEN_SHIFT;
- return 0;
}
-int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"'%d' invalid for parameter otg_ver\n", val);
@@ -2570,11 +2505,71 @@ int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
}
val = 0;
dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->otg_ver = val;
- return retval;
+}
+
+static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter uframe_sched\n",
+ val);
+ dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+ }
+ val = 1;
+ dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+ }
+
+ hsotg->core_params->uframe_sched = val;
+}
+
+/*
+ * This function is called during module intialization to pass module parameters
+ * for the DWC_otg core.
+ */
+void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params)
+{
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ dwc2_set_param_otg_cap(hsotg, params->otg_cap);
+ dwc2_set_param_dma_enable(hsotg, params->dma_enable);
+ dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+ dwc2_set_param_host_support_fs_ls_low_power(hsotg,
+ params->host_support_fs_ls_low_power);
+ dwc2_set_param_enable_dynamic_fifo(hsotg,
+ params->enable_dynamic_fifo);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
+ dwc2_set_param_max_transfer_size(hsotg,
+ params->max_transfer_size);
+ dwc2_set_param_max_packet_count(hsotg,
+ params->max_packet_count);
+ dwc2_set_param_host_channels(hsotg, params->host_channels);
+ dwc2_set_param_phy_type(hsotg, params->phy_type);
+ dwc2_set_param_speed(hsotg, params->speed);
+ dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
+ params->host_ls_low_power_phy_clk);
+ dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
+ dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
+ params->phy_ulpi_ext_vbus);
+ dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
+ dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
+ dwc2_set_param_ts_dline(hsotg, params->ts_dline);
+ dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
+ dwc2_set_param_en_multiple_tx_fifo(hsotg,
+ params->en_multiple_tx_fifo);
+ dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
+ dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
+ dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+ dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
}
/**
@@ -2736,88 +2731,17 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
return 0;
}
-int dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
-{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter uframe_sched\n",
- val);
- dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
- }
- val = 1;
- dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
- retval = -EINVAL;
- }
-
- hsotg->core_params->uframe_sched = val;
- return retval;
-}
-
-/*
- * This function is called during module intialization to pass module parameters
- * for the DWC_otg core. It returns non-0 if any parameters are invalid.
- */
-int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params)
-{
- int retval = 0;
-
- dev_dbg(hsotg->dev, "%s()\n", __func__);
-
- retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
- retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
- retval |= dwc2_set_param_dma_desc_enable(hsotg,
- params->dma_desc_enable);
- retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
- params->host_support_fs_ls_low_power);
- retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
- params->enable_dynamic_fifo);
- retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
- retval |= dwc2_set_param_max_transfer_size(hsotg,
- params->max_transfer_size);
- retval |= dwc2_set_param_max_packet_count(hsotg,
- params->max_packet_count);
- retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
- retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
- retval |= dwc2_set_param_speed(hsotg, params->speed);
- retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
- params->host_ls_low_power_phy_clk);
- retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
- retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
- params->phy_ulpi_ext_vbus);
- retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
- retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
- retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
- retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
- retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
- params->en_multiple_tx_fifo);
- retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
- retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
- retval |= dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
-
- return retval;
-}
-
u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
{
- return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
+ return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
}
-int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
+bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
{
if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
- return -1;
+ return false;
else
- return 0;
+ return true;
}
/**
diff --git a/drivers/staging/dwc2/core.h b/drivers/usb/dwc2/core.h
index fab718d..648519c 100644
--- a/drivers/staging/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -544,7 +544,7 @@ extern void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg);
extern void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg);
extern u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
-extern int dwc2_check_core_status(struct dwc2_hsotg *hsotg);
+extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
/*
* Common core Functions.
@@ -571,7 +571,7 @@ extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
* 1 - SRP Only capable
* 2 - No HNP/SRP capable
*/
-extern int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
@@ -583,7 +583,7 @@ extern int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
* 0 - Slave
* 1 - DMA (default, if available)
*/
-extern int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
/*
* When DMA mode is enabled specifies whether to use
@@ -593,7 +593,7 @@ extern int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
* 0 - address DMA
* 1 - DMA Descriptor(default, if available)
*/
-extern int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies the maximum speed of operation in host and device mode.
@@ -603,7 +603,7 @@ extern int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
* 0 - High Speed (default)
* 1 - Full Speed
*/
-extern int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
#define DWC2_SPEED_PARAM_HIGH 0
#define DWC2_SPEED_PARAM_FULL 1
@@ -614,8 +614,8 @@ extern int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
* 0 - Don't support low power mode (default)
* 1 - Support low power mode
*/
-extern int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_support_fs_ls_low_power(
+ struct dwc2_hsotg *hsotg, int val);
/*
* Specifies the PHY clock rate in low power mode when connected to a
@@ -626,8 +626,8 @@ extern int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
* 0 - 48 MHz
* 1 - 6 MHz
*/
-extern int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
+ int val);
#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
@@ -635,50 +635,50 @@ extern int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
* 0 - Use cC FIFO size parameters
* 1 - Allow dynamic FIFO sizing (default)
*/
-extern int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
+ int val);
/*
* Number of 4-byte words in the Rx FIFO in host mode when dynamic
* FIFO sizing is enabled.
* 16 to 32768 (default 1024)
*/
-extern int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
/*
* Number of 4-byte words in the non-periodic Tx FIFO in host mode
* when Dynamic FIFO sizing is enabled in the core.
* 16 to 32768 (default 256)
*/
-extern int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val);
/*
* Number of 4-byte words in the host periodic Tx FIFO when dynamic
* FIFO sizing is enabled.
* 16 to 32768 (default 256)
*/
-extern int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val);
/*
* The maximum transfer size supported in bytes.
* 2047 to 65,535 (default 65,535)
*/
-extern int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
/*
* The maximum number of packets in a transfer.
* 15 to 511 (default 511)
*/
-extern int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
/*
* The number of host channel registers to use.
* 1 to 16 (default 11)
* Note: The FPGA configuration supports a maximum of 11 host channels.
*/
-extern int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies the type of PHY interface to use. By default, the driver
@@ -688,7 +688,7 @@ extern int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
* 1 - UTMI+ (default)
* 2 - ULPI
*/
-extern int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
#define DWC2_PHY_TYPE_PARAM_FS 0
#define DWC2_PHY_TYPE_PARAM_UTMI 1
#define DWC2_PHY_TYPE_PARAM_ULPI 2
@@ -704,7 +704,7 @@ extern int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
*
* 8 or 16 bits (default 16)
*/
-extern int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies whether the ULPI operates at double or single
@@ -716,13 +716,13 @@ extern int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
* 1 - double data rate ULPI interface with 4 bit wide data
* bus
*/
-extern int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies whether to use the internal or external supply to
* drive the vbus with a ULPI phy.
*/
-extern int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
@@ -732,11 +732,11 @@ extern int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
* 0 - No (default)
* 1 - Yes
*/
-extern int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies whether dedicated transmit FIFOs are
@@ -744,14 +744,14 @@ extern int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
* 0 - No
* 1 - Yes
*/
-extern int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
+ int val);
-extern int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
/*
* Dump core registers and SPRAM
diff --git a/drivers/staging/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 07cfa2f..8205799 100644
--- a/drivers/staging/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -55,7 +55,6 @@
static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
{
-#ifdef DEBUG
switch (hsotg->op_state) {
case OTG_STATE_A_HOST:
return "a_host";
@@ -70,9 +69,6 @@ static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
default:
return "unknown";
}
-#else
- return "";
-#endif
}
/**
@@ -419,12 +415,10 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
gintmsk = readl(hsotg->regs + GINTMSK);
gahbcfg = readl(hsotg->regs + GAHBCFG);
-#ifdef DEBUG
/* If any common interrupts set */
if (gintsts & gintmsk_common)
dev_dbg(hsotg->dev, "gintsts=%08x gintmsk=%08x\n",
gintsts, gintmsk);
-#endif
if (gahbcfg & GAHBCFG_GLBL_INTR_EN)
return gintsts & gintmsk & gintmsk_common;
@@ -451,8 +445,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
- if (dwc2_check_core_status(hsotg) < 0) {
- dev_warn(hsotg->dev, "Controller is disconnected\n");
+ if (!dwc2_is_controller_alive(hsotg)) {
+ dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
diff --git a/drivers/staging/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 078cd91..f59484d 100644
--- a/drivers/staging/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -355,6 +355,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
unsigned long flags;
u32 intr_mask;
int retval;
+ int dev_speed;
if (!hsotg->flags.b.port_connect_status) {
/* No longer connected */
@@ -362,6 +363,19 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
return -ENODEV;
}
+ dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
+
+ /* Some configurations cannot support LS traffic on a FS root port */
+ if ((dev_speed == USB_SPEED_LOW) &&
+ (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
+ (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
+ u32 hprt0 = readl(hsotg->regs + HPRT0);
+ u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+
+ if (prtspd == HPRT0_SPD_FULL_SPEED)
+ return -ENODEV;
+ }
+
qtd = kzalloc(sizeof(*qtd), mem_flags);
if (!qtd)
return -ENOMEM;
@@ -369,7 +383,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
dwc2_hcd_qtd_init(qtd, urb);
retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
mem_flags);
- if (retval < 0) {
+ if (retval) {
dev_err(hsotg->dev,
"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
retval);
@@ -378,7 +392,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
}
intr_mask = readl(hsotg->regs + GINTMSK);
- if (!(intr_mask & GINTSTS_SOF) && retval == 0) {
+ if (!(intr_mask & GINTSTS_SOF)) {
enum dwc2_transaction_type tr_type;
if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
@@ -396,7 +410,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
spin_unlock_irqrestore(&hsotg->lock, flags);
}
- return retval;
+ return 0;
}
/* Must be called with interrupt disabled and spinlock held */
@@ -1795,7 +1809,7 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
{
- return (hsotg->op_state == OTG_STATE_B_HOST);
+ return hsotg->op_state == OTG_STATE_B_HOST;
}
static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
diff --git a/drivers/staging/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 89a5484..fdc6d48 100644
--- a/drivers/staging/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -452,8 +452,8 @@ static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
const struct dwc2_core_params *params);
extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
-extern int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
+extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params);
extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
diff --git a/drivers/staging/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index c7d4345..3376177 100644
--- a/drivers/staging/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -621,8 +621,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
- if (len > MAX_DMA_DESC_SIZE)
- len = MAX_DMA_DESC_SIZE - chan->max_packet + 1;
+ if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
+ len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
@@ -1103,8 +1103,10 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
for (i = 0; i < qtd->n_desc; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
- &xfer_done))
+ &xfer_done)) {
+ qtd = NULL;
break;
+ }
desc_num++;
}
}
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index dda1854..012f17e 100644
--- a/drivers/staging/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -935,7 +935,7 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc->actual_length += len;
- if (chan->align_buf && len) {
+ if (chan->align_buf) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
qtd->urb->length, DMA_FROM_DEVICE);
@@ -2059,8 +2059,8 @@ irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
u32 gintsts, dbg_gintsts;
irqreturn_t retval = IRQ_NONE;
- if (dwc2_check_core_status(hsotg) < 0) {
- dev_warn(hsotg->dev, "Controller is disconnected\n");
+ if (!dwc2_is_controller_alive(hsotg)) {
+ dev_warn(hsotg->dev, "Controller is dead\n");
return retval;
}
diff --git a/drivers/staging/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index f200f1f..9540f7e 100644
--- a/drivers/staging/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -344,25 +344,17 @@ void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned short utime = qh->usecs;
- int done = 0;
- int i = 0;
- int ret = -1;
+ int i;
- while (!done) {
+ for (i = 0; i < 8; i++) {
/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
if (utime <= hsotg->frame_usecs[i]) {
hsotg->frame_usecs[i] -= utime;
qh->frame_usecs[i] += utime;
- ret = i;
- done = 1;
- } else {
- i++;
- if (i == 8)
- done = 1;
+ return i;
}
}
-
- return ret;
+ return -ENOSPC;
}
/*
@@ -372,21 +364,14 @@ static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned short utime = qh->usecs;
unsigned short xtime;
- int t_left = utime;
- int done = 0;
- int i = 0;
+ int t_left;
+ int i;
int j;
- int ret = -1;
-
- while (!done) {
- if (hsotg->frame_usecs[i] <= 0) {
- i++;
- if (i == 8) {
- ret = -1;
- done = 1;
- }
+ int k;
+
+ for (i = 0; i < 8; i++) {
+ if (hsotg->frame_usecs[i] <= 0)
continue;
- }
/*
* we need n consecutive slots so use j as a start slot
@@ -400,50 +385,35 @@ static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
if (xtime + hsotg->frame_usecs[j] < utime) {
if (hsotg->frame_usecs[j] <
- max_uframe_usecs[j]) {
- ret = -1;
- break;
- }
+ max_uframe_usecs[j])
+ continue;
}
if (xtime >= utime) {
- ret = i;
- break;
+ t_left = utime;
+ for (k = i; k < 8; k++) {
+ t_left -= hsotg->frame_usecs[k];
+ if (t_left <= 0) {
+ qh->frame_usecs[k] +=
+ hsotg->frame_usecs[k]
+ + t_left;
+ hsotg->frame_usecs[k] = -t_left;
+ return i;
+ } else {
+ qh->frame_usecs[k] +=
+ hsotg->frame_usecs[k];
+ hsotg->frame_usecs[k] = 0;
+ }
+ }
}
/* add the frame time to x time */
xtime += hsotg->frame_usecs[j];
/* we must have a fully available next frame or break */
if (xtime < utime &&
- hsotg->frame_usecs[j] == max_uframe_usecs[j]) {
- ret = -1;
- break;
- }
- }
- if (ret >= 0) {
- t_left = utime;
- for (j = i; t_left > 0 && j < 8; j++) {
- t_left -= hsotg->frame_usecs[j];
- if (t_left <= 0) {
- qh->frame_usecs[j] +=
- hsotg->frame_usecs[j] + t_left;
- hsotg->frame_usecs[j] = -t_left;
- ret = i;
- done = 1;
- } else {
- qh->frame_usecs[j] +=
- hsotg->frame_usecs[j];
- hsotg->frame_usecs[j] = 0;
- }
- }
- } else {
- i++;
- if (i == 8) {
- ret = -1;
- done = 1;
- }
+ hsotg->frame_usecs[j] == max_uframe_usecs[j])
+ continue;
}
}
-
- return ret;
+ return -ENOSPC;
}
static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
@@ -517,12 +487,12 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
frame = status - 1;
/* Set the new frame up */
- if (frame > -1) {
+ if (frame >= 0) {
qh->sched_frame &= ~0x7;
qh->sched_frame |= (frame & 7);
}
- if (status != -1)
+ if (status > 0)
status = 0;
} else {
status = dwc2_periodic_channel_available(hsotg);
@@ -609,7 +579,7 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
*/
int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
- int status = 0;
+ int status;
u32 intr_mask;
if (dbg_qh(qh))
@@ -617,26 +587,27 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (!list_empty(&qh->qh_list_entry))
/* QH already in a schedule */
- return status;
+ return 0;
/* Add the new QH to the appropriate schedule */
if (dwc2_qh_is_non_per(qh)) {
/* Always start in inactive schedule */
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_inactive);
- } else {
- status = dwc2_schedule_periodic(hsotg, qh);
- if (status == 0) {
- if (!hsotg->periodic_qh_count) {
- intr_mask = readl(hsotg->regs + GINTMSK);
- intr_mask |= GINTSTS_SOF;
- writel(intr_mask, hsotg->regs + GINTMSK);
- }
- hsotg->periodic_qh_count++;
- }
+ return 0;
}
- return status;
+ status = dwc2_schedule_periodic(hsotg, qh);
+ if (status)
+ return status;
+ if (!hsotg->periodic_qh_count) {
+ intr_mask = readl(hsotg->regs + GINTMSK);
+ intr_mask |= GINTSTS_SOF;
+ writel(intr_mask, hsotg->regs + GINTMSK);
+ }
+ hsotg->periodic_qh_count++;
+
+ return 0;
}
/**
@@ -661,14 +632,15 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
hsotg->non_periodic_qh_ptr =
hsotg->non_periodic_qh_ptr->next;
list_del_init(&qh->qh_list_entry);
- } else {
- dwc2_deschedule_periodic(hsotg, qh);
- hsotg->periodic_qh_count--;
- if (!hsotg->periodic_qh_count) {
- intr_mask = readl(hsotg->regs + GINTMSK);
- intr_mask &= ~GINTSTS_SOF;
- writel(intr_mask, hsotg->regs + GINTMSK);
- }
+ return;
+ }
+
+ dwc2_deschedule_periodic(hsotg, qh);
+ hsotg->periodic_qh_count--;
+ if (!hsotg->periodic_qh_count) {
+ intr_mask = readl(hsotg->regs + GINTMSK);
+ intr_mask &= ~GINTSTS_SOF;
+ writel(intr_mask, hsotg->regs + GINTMSK);
}
}
@@ -723,6 +695,8 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int sched_next_periodic_split)
{
+ u16 frame_number;
+
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -731,37 +705,36 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
if (!list_empty(&qh->qtd_list))
/* Add back to inactive non-periodic schedule */
dwc2_hcd_qh_add(hsotg, qh);
+ return;
+ }
+
+ frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ if (qh->do_split) {
+ dwc2_sched_periodic_split(hsotg, qh, frame_number,
+ sched_next_periodic_split);
} else {
- u16 frame_number = dwc2_hcd_get_frame_number(hsotg);
-
- if (qh->do_split) {
- dwc2_sched_periodic_split(hsotg, qh, frame_number,
- sched_next_periodic_split);
- } else {
- qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
- qh->interval);
- if (dwc2_frame_num_le(qh->sched_frame, frame_number))
- qh->sched_frame = frame_number;
- }
+ qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
+ qh->interval);
+ if (dwc2_frame_num_le(qh->sched_frame, frame_number))
+ qh->sched_frame = frame_number;
+ }
- if (list_empty(&qh->qtd_list)) {
- dwc2_hcd_qh_unlink(hsotg, qh);
- } else {
- /*
- * Remove from periodic_sched_queued and move to
- * appropriate queue
- */
- if ((hsotg->core_params->uframe_sched > 0 &&
- dwc2_frame_num_le(qh->sched_frame, frame_number))
- || (hsotg->core_params->uframe_sched <= 0 &&
- qh->sched_frame == frame_number))
- list_move(&qh->qh_list_entry,
- &hsotg->periodic_sched_ready);
- else
- list_move(&qh->qh_list_entry,
- &hsotg->periodic_sched_inactive);
- }
+ if (list_empty(&qh->qtd_list)) {
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ return;
}
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue
+ */
+ if ((hsotg->core_params->uframe_sched > 0 &&
+ dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
+ (hsotg->core_params->uframe_sched <= 0 &&
+ qh->sched_frame == frame_number))
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
+ else
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
}
/**
diff --git a/drivers/staging/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 9c92a3c..9c92a3c 100644
--- a/drivers/staging/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
diff --git a/drivers/staging/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index 3d14c88..c291fca 100644
--- a/drivers/staging/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -152,7 +152,7 @@ static int dwc2_driver_probe(struct pci_dev *dev,
return retval;
}
-static DEFINE_PCI_DEVICE_TABLE(dwc2_pci_ids) = {
+static const struct pci_device_id dwc2_pci_ids[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
},
diff --git a/drivers/staging/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 83ca105..d01d0d3 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "core.h"
@@ -46,6 +47,34 @@
static const char dwc2_driver_name[] = "dwc2";
+static const struct dwc2_core_params params_bcm2835 = {
+ .otg_cap = 0, /* HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_enable = 1,
+ .dma_desc_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 774, /* 774 DWORDs */
+ .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
+ .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 8,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8, /* 8 bits */
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = 0x10,
+ .uframe_sched = 0,
+};
+
/**
* dwc2_driver_remove() - Called when the DWC_otg core is unregistered with the
* DWC_otg driver
@@ -66,6 +95,13 @@ static int dwc2_driver_remove(struct platform_device *dev)
return 0;
}
+static const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ { .compatible = "snps,dwc2", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
@@ -80,14 +116,22 @@ static int dwc2_driver_remove(struct platform_device *dev)
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
+ const struct of_device_id *match;
+ const struct dwc2_core_params *params;
+ struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
int irq;
- struct dwc2_core_params params;
- /* Default all params to autodetect */
- dwc2_set_all_params(&params, -1);
+ match = of_match_device(dwc2_of_match_table, &dev->dev);
+ if (match && match->data) {
+ params = match->data;
+ } else {
+ /* Default all params to autodetect */
+ dwc2_set_all_params(&defparams, -1);
+ params = &defparams;
+ }
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
@@ -118,7 +162,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)res->start, hsotg->regs);
- retval = dwc2_hcd_init(hsotg, irq, &params);
+ retval = dwc2_hcd_init(hsotg, irq, params);
if (retval)
return retval;
@@ -127,15 +171,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
return retval;
}
-static const struct of_device_id dwc2_of_match_table[] = {
- { .compatible = "snps,dwc2" },
- {},
-};
-MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
-
static struct platform_driver dwc2_platform_driver = {
.driver = {
- .name = (char *)dwc2_driver_name,
+ .name = dwc2_driver_name,
.of_match_table = dwc2_of_match_table,
},
.probe = dwc2_driver_probe,
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 0dac36c..518f790 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -3710,7 +3710,7 @@ default_chipset:
if (!videomemory) {
dev_warn(&pdev->dev,
"Unable to map videomem cached writethrough\n");
- info->screen_base = (char *)ZTWO_VADDR(info->fix.smem_start);
+ info->screen_base = ZTWO_VADDR(info->fix.smem_start);
} else
info->screen_base = (char *)videomemory;
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 5aab9b9..d992aa5 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2256,7 +2256,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
info->fix.mmio_start = regbase;
cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
- : (caddr_t)ZTWO_VADDR(regbase);
+ : ZTWO_VADDR(regbase);
if (!cinfo->regbase) {
dev_err(info->device, "Cannot map registers\n");
error = -EIO;
@@ -2266,7 +2266,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
info->fix.smem_start = rambase;
info->screen_size = ramsize;
info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
- : (caddr_t)ZTWO_VADDR(rambase);
+ : ZTWO_VADDR(rambase);
if (!info->screen_base) {
dev_err(info->device, "Cannot map video RAM\n");
error = -EIO;
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index 5bd2eb8..cda7587 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -34,7 +34,6 @@
#include <linux/fb.h>
#include <asm/setup.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/io.h>
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index e287ebc..97cb9bd 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -56,7 +56,6 @@
#include <linux/cuda.h>
#include <asm/io.h>
#ifdef CONFIG_MAC
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#else
#include <asm/prom.h>
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654..5c4a95b 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
}
diff --git a/drivers/vme/Kconfig b/drivers/vme/Kconfig
index c5c2246..a6a6f95 100644
--- a/drivers/vme/Kconfig
+++ b/drivers/vme/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig VME_BUS
- tristate "VME bridge support"
+ bool "VME bridge support"
depends on PCI
---help---
If you say Y here you get support for the VME bridge Framework.
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
index cf74aee..ac42212 100644
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -27,7 +27,7 @@ static void __iomem *vmic_base;
static const char driver_name[] = "vmivme_7805";
-static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
+static const struct pci_device_id vmic_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index f844857..a06edbf 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -42,7 +42,7 @@ static int geoid;
static const char driver_name[] = "vme_ca91cx42";
-static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
+static const struct pci_device_id ca91cx42_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
{ },
};
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 9cf8833..16830d8 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -45,7 +45,7 @@ static int geoid;
static const char driver_name[] = "vme_tsi148";
-static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
+static const struct pci_device_id tsi148_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index f6856b4..7516030 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1274,7 +1274,7 @@ void vme_lm_free(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_lm_free);
-int vme_slot_get(struct vme_dev *vdev)
+int vme_slot_num(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
@@ -1285,14 +1285,27 @@ int vme_slot_get(struct vme_dev *vdev)
}
if (bridge->slot_get == NULL) {
- printk(KERN_WARNING "vme_slot_get not supported\n");
+ printk(KERN_WARNING "vme_slot_num not supported\n");
return -EINVAL;
}
return bridge->slot_get(bridge);
}
-EXPORT_SYMBOL(vme_slot_get);
+EXPORT_SYMBOL(vme_slot_num);
+int vme_bus_num(struct vme_dev *vdev)
+{
+ struct vme_bridge *bridge;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ pr_err("Can't find VME bus\n");
+ return -EINVAL;
+ }
+
+ return bridge->num;
+}
+EXPORT_SYMBOL(vme_bus_num);
/* - Bridge Registration --------------------------------------------------- */
@@ -1512,9 +1525,5 @@ static void __exit vme_exit(void)
bus_unregister(&vme_bus_type);
}
-MODULE_DESCRIPTION("VME bridge driver framework");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
-MODULE_LICENSE("GPL");
-
-module_init(vme_init);
+subsys_initcall(vme_init);
module_exit(vme_exit);
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 15c7251..1e5d94c 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -46,7 +46,6 @@
struct mxc_w1_device {
void __iomem *regs;
- unsigned int clkdiv;
struct clk *clk;
struct w1_bus_master bus_master;
};
@@ -106,8 +105,10 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
static int mxc_w1_probe(struct platform_device *pdev)
{
struct mxc_w1_device *mdev;
+ unsigned long clkrate;
struct resource *res;
- int err = 0;
+ unsigned int clkdiv;
+ int err;
mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
GFP_KERNEL);
@@ -118,27 +119,39 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
- mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
+ clkrate = clk_get_rate(mdev->clk);
+ if (clkrate < 10000000)
+ dev_warn(&pdev->dev,
+ "Low clock frequency causes improper function\n");
+
+ clkdiv = DIV_ROUND_CLOSEST(clkrate, 1000000);
+ clkrate /= clkdiv;
+ if ((clkrate < 980000) || (clkrate > 1020000))
+ dev_warn(&pdev->dev,
+ "Incorrect time base frequency %lu Hz\n", clkrate);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mdev->regs))
return PTR_ERR(mdev->regs);
- clk_prepare_enable(mdev->clk);
- __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
+ err = clk_prepare_enable(mdev->clk);
+ if (err)
+ return err;
+
+ __raw_writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
- err = w1_add_master_device(&mdev->bus_master);
+ platform_set_drvdata(pdev, mdev);
+ err = w1_add_master_device(&mdev->bus_master);
if (err)
- return err;
+ clk_disable_unprepare(mdev->clk);
- platform_set_drvdata(pdev, mdev);
- return 0;
+ return err;
}
/*
diff --git a/drivers/zorro/Makefile b/drivers/zorro/Makefile
index f621726..7dc5332f 100644
--- a/drivers/zorro/Makefile
+++ b/drivers/zorro/Makefile
@@ -2,8 +2,9 @@
# Makefile for the Zorro bus specific drivers.
#
-obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o names.o
+obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_ZORRO_NAMES) += names.o
hostprogs-y := gen-devlist
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index e8517c3..6f3fd99 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -15,8 +15,6 @@
#include <linux/zorro.h>
-#ifdef CONFIG_ZORRO_NAMES
-
struct zorro_prod_info {
__u16 prod;
unsigned short seen;
@@ -69,7 +67,6 @@ void __init zorro_name_device(struct zorro_dev *dev)
} while (--i);
/* Couldn't find either the manufacturer nor the product */
- sprintf(name, "Zorro device %08x", dev->id);
return;
match_manuf: {
@@ -98,11 +95,3 @@ void __init zorro_name_device(struct zorro_dev *dev)
}
}
}
-
-#else
-
-void __init zorro_name_device(struct zorro_dev *dev)
-{
-}
-
-#endif
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index ea1ce82..6ac2579 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -14,6 +14,8 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/export.h>
+
+#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <asm/amigahw.h>
#include <asm/setup.h>
@@ -41,10 +43,10 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
- cd.cd_SlotAddr = z->slotaddr;
- cd.cd_SlotSize = z->slotsize;
- cd.cd_BoardAddr = (void *)zorro_resource_start(z);
- cd.cd_BoardSize = zorro_resource_len(z);
+ cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
+ cd.cd_SlotSize = cpu_to_be16(z->slotsize);
+ cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
+ cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
if (copy_to_user(buf, (void *)&cd + pos, nbytes))
return -EFAULT;
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index ac1db7f..eacae14 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -161,11 +161,12 @@ static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
}
struct bus_type zorro_bus_type = {
- .name = "zorro",
- .match = zorro_bus_match,
- .uevent = zorro_uevent,
- .probe = zorro_device_probe,
- .remove = zorro_device_remove,
+ .name = "zorro",
+ .dev_name = "zorro",
+ .match = zorro_bus_match,
+ .uevent = zorro_uevent,
+ .probe = zorro_device_probe,
+ .remove = zorro_device_remove,
};
EXPORT_SYMBOL(zorro_bus_type);
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 26f7184..36b210f 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -16,6 +16,8 @@
#include <linux/stat.h>
#include <linux/string.h>
+#include <asm/byteorder.h>
+
#include "zorro.h"
@@ -33,10 +35,20 @@ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
zorro_config_attr(id, id, "0x%08x\n");
zorro_config_attr(type, rom.er_Type, "0x%02x\n");
-zorro_config_attr(serial, rom.er_SerialNumber, "0x%08x\n");
zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
zorro_config_attr(slotsize, slotsize, "0x%04x\n");
+static ssize_t
+show_serial(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct zorro_dev *z;
+
+ z = to_zorro_dev(dev);
+ return sprintf(buf, "0x%08x\n", be32_to_cpu(z->rom.er_SerialNumber));
+}
+
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+
static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
@@ -60,10 +72,10 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
- cd.cd_SlotAddr = z->slotaddr;
- cd.cd_SlotSize = z->slotsize;
- cd.cd_BoardAddr = (void *)zorro_resource_start(z);
- cd.cd_BoardSize = zorro_resource_len(z);
+ cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
+ cd.cd_SlotSize = cpu_to_be16(z->slotsize);
+ cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
+ cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 858c971..707c1a5 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <asm/byteorder.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@@ -29,7 +30,8 @@
*/
unsigned int zorro_num_autocon;
-struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
+struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
+struct zorro_dev *zorro_autocon;
/*
@@ -38,6 +40,7 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
struct zorro_bus {
struct device dev;
+ struct zorro_dev devices[0];
};
@@ -125,18 +128,22 @@ static struct resource __init *zorro_find_parent_resource(
static int __init amiga_zorro_probe(struct platform_device *pdev)
{
struct zorro_bus *bus;
+ struct zorro_dev_init *zi;
struct zorro_dev *z;
struct resource *r;
unsigned int i;
int error;
/* Initialize the Zorro bus */
- bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ bus = kzalloc(sizeof(*bus) +
+ zorro_num_autocon * sizeof(bus->devices[0]),
+ GFP_KERNEL);
if (!bus)
return -ENOMEM;
+ zorro_autocon = bus->devices;
bus->dev.parent = &pdev->dev;
- dev_set_name(&bus->dev, "zorro");
+ dev_set_name(&bus->dev, zorro_bus_type.name);
error = device_register(&bus->dev);
if (error) {
pr_err("Zorro: Error registering zorro_bus\n");
@@ -151,15 +158,23 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
/* First identify all devices ... */
for (i = 0; i < zorro_num_autocon; i++) {
+ zi = &zorro_autocon_init[i];
z = &zorro_autocon[i];
- z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
+
+ z->rom = zi->rom;
+ z->id = (be16_to_cpu(z->rom.er_Manufacturer) << 16) |
+ (z->rom.er_Product << 8);
if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
/* GVP quirk */
- unsigned long magic = zorro_resource_start(z)+0x8000;
+ unsigned long magic = zi->boardaddr + 0x8000;
z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
}
+ z->slotaddr = zi->slotaddr;
+ z->slotsize = zi->slotsize;
sprintf(z->name, "Zorro device %08x", z->id);
zorro_name_device(z);
+ z->resource.start = zi->boardaddr;
+ z->resource.end = zi->boardaddr + zi->boardsize - 1;
z->resource.name = z->name;
r = zorro_find_parent_resource(pdev, z);
error = request_resource(r, &z->resource);
@@ -167,9 +182,9 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
dev_err(&bus->dev,
"Address space collision on device %s %pR\n",
z->name, &z->resource);
- dev_set_name(&z->dev, "%02x", i);
z->dev.parent = &bus->dev;
z->dev.bus = &zorro_bus_type;
+ z->dev.id = i;
}
/* ... then register them */
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index b682d5c..34119fb 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -1,4 +1,9 @@
+#ifdef CONFIG_ZORRO_NAMES
extern void zorro_name_device(struct zorro_dev *z);
+#else
+static inline void zorro_name_device(struct zorro_dev *dev) { }
+#endif
+
extern int zorro_create_sysfs_dev_files(struct zorro_dev *z);
diff --git a/fs/Makefile b/fs/Makefile
index 4fe6df3..39a824f 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_FHANDLE) += fhandle.o
obj-y += quota/
obj-$(CONFIG_PROC_FS) += proc/
-obj-$(CONFIG_SYSFS) += sysfs/
+obj-$(CONFIG_SYSFS) += sysfs/ kernfs/
obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index aa33976..2c29db6 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
-extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid);
+extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ struct cifs_fattr *fattr,
+ const unsigned char *path);
extern int mdfour(unsigned char *, unsigned char *, int);
extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
const struct nls_table *codepage);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 124aa02..d707edb 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4010,7 +4010,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+ cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4179,7 +4179,7 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+ cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4263,7 +4263,7 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+ cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 11ff5f1..a514e0a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
static int
cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
struct tcon_link *tlink, unsigned oflags, umode_t mode,
- __u32 *oplock, struct cifs_fid *fid, int *created)
+ __u32 *oplock, struct cifs_fid *fid)
{
int rc = -ENOENT;
int create_options = CREATE_NOT_DIR;
@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
.device = 0,
};
- *created |= FILE_CREATED;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = current_fsuid();
if (inode->i_mode & S_ISGID)
@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
cifs_add_pending_open(&fid, tlink, &open);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
- &oplock, &fid, opened);
+ &oplock, &fid);
if (rc) {
cifs_del_pending_open(&open);
goto out;
}
+ if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+ *opened |= FILE_CREATED;
+
rc = finish_open(file, direntry, generic_file_open, opened);
if (rc) {
if (server->ops->close)
@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
struct TCP_Server_Info *server;
struct cifs_fid fid;
__u32 oplock;
- int created = FILE_CREATED;
cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
inode, direntry->d_name.name, direntry);
@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
server->ops->new_lease_key(&fid);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
- &oplock, &fid, &created);
+ &oplock, &fid);
if (!rc && server->ops->close)
server->ops->close(xid, tcon, &fid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 36f9ebb..49719b8 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
- int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+ int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+ full_path);
if (tmprc)
cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
}
@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
- tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+ tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+ full_path);
if (tmprc)
cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cc023471..92aee08 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid)
+CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+ const unsigned char *path)
{
- int rc = 0;
+ int rc;
u8 *buf = NULL;
unsigned int link_len = 0;
unsigned int bytes_read = 0;
- struct cifs_tcon *ptcon;
if (!CIFSCouldBeMFSymlink(fattr))
/* it's not a symlink */
return 0;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!buf)
+ return -ENOMEM;
- ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
- if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
- rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
- &bytes_read, cifs_sb, xid);
+ if (tcon->ses->server->ops->query_mf_symlink)
+ rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
+ &bytes_read, cifs_sb, xid);
else
- goto out;
+ rc = -ENOSYS;
- if (rc != 0)
+ if (rc)
goto out;
if (bytes_read == 0) /* not a symlink */
diff --git a/fs/dcache.c b/fs/dcache.c
index 6055d61..cb4a106 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3061,8 +3061,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
* thus don't need to be hashed. They also don't need a name until a
* user wants to identify the object in /proc/pid/fd/. The little hack
* below allows us to generate a name for these objects on demand:
+ *
+ * Some pseudo inodes are mountable. When they are mounted
+ * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
+ * and instead have d_path return the mounted path.
*/
- if (path->dentry->d_op && path->dentry->d_op->d_dname)
+ if (path->dentry->d_op && path->dentry->d_op->d_dname &&
+ (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
rcu_read_lock();
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 8b5e258..af90312 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1907,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
}
}
}
- if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
- tep = tf.file->private_data;
- mutex_lock_nested(&tep->mtx, 1);
- }
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 2885349..20d6697 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
+ tmp_bh.b_size = sb->s_blocksize;
err = ext2_get_block(inode, blk, &tmp_bh, 1);
if (err < 0)
goto out;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e618503..ece5556 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -268,6 +268,16 @@ struct ext4_io_submit {
/* Translate # of blks to # of clusters */
#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
(sbi)->s_cluster_bits)
+/* Mask out the low bits to get the starting block of the cluster */
+#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
+ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
+ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+/* Get the cluster offset */
+#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
+ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
+ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
/*
* Structure of a blocks group descriptor
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 17ac112..3fe29de 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
if (WARN_ON_ONCE(err)) {
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
+ ext4_error_inode(inode, where, line,
+ bh->b_blocknr,
+ "journal_dirty_metadata failed: "
+ "handle type %u started at line %u, "
+ "credits %u/%u, errcode %d",
+ handle->h_type,
+ handle->h_line_no,
+ handle->h_requested_credits,
+ handle->h_buffer_credits, err);
}
} else {
if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 35f65cf..3384dc4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
+ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+ ext4_lblk_t last = lblock + len - 1;
- if (len == 0)
+ if (lblock > last)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ ext4_fsblk_t pblock = 0;
+ ext4_lblk_t lblock = 0;
+ ext4_lblk_t prev = 0;
+ int len = 0;
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
+
+ /* Check for overlapping extents */
+ lblock = le32_to_cpu(ext->ee_block);
+ len = ext4_ext_get_actual_len(ext);
+ if ((lblock <= prev) && prev) {
+ pblock = ext4_ext_pblock(ext);
+ es->s_last_error_block = cpu_to_le64(pblock);
+ return 0;
+ }
ext++;
entries--;
+ prev = lblock + len - 1;
}
} else {
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
- b2 &= ~(sbi->s_cluster_ratio - 1);
+ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
/*
* get the next allocated block if the extent in the path
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS)
goto out;
- b2 &= ~(sbi->s_cluster_ratio - 1);
+ b2 = EXT4_LBLK_CMASK(sbi, b2);
}
/* check for wrap through zero on extent logical start block*/
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* extent, we have to mark the cluster as used (store negative
* cluster number in partial_cluster).
*/
- unaligned = pblk & (sbi->s_cluster_ratio - 1);
+ unaligned = EXT4_PBLK_COFF(sbi, pblk);
if (unaligned && (ee_len == num) &&
(*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
*partial_cluster = EXT4_B2C(sbi, pblk);
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* accidentally freeing it later on
*/
pblk = ext4_ext_pblock(ex);
- if (pblk & (sbi->s_cluster_ratio - 1))
+ if (EXT4_PBLK_COFF(sbi, pblk))
*partial_cluster =
-((long long)EXT4_B2C(sbi, pblk));
ex--;
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end;
- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
+ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */
- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
+ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
if (c_offset) {
- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
+ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
}
/* Now check towards right. */
- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
+ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_ext_path *path)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ext4_lblk_t ex_cluster_start, ex_cluster_end;
ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
(rr_cluster_start == ex_cluster_start)) {
if (rr_cluster_start == ex_cluster_end)
ee_start += ee_len - 1;
- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
- c_offset;
+ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
map->m_len = min(map->m_len,
(unsigned) sbi->s_cluster_ratio - c_offset);
/*
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
newex.ee_block = cpu_to_le32(map->m_lblk);
- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+ cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
+ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0757634..61d49ff 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
*/
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
{
- int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
* in order to allocate nrblocks
* worse case is one extent per block
*/
-repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
@@ -1238,10 +1236,6 @@ repeat:
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- cond_resched();
- goto repeat;
- }
return -ENOSPC;
}
ei->i_reserved_meta_blocks += md_needed;
@@ -1255,7 +1249,6 @@ repeat:
*/
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
{
- int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
* in order to allocate nrblocks
* worse case is one extent per block
*/
-repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
@@ -1297,10 +1289,6 @@ repeat:
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- cond_resched();
- goto repeat;
- }
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4d113ef..04a5c75 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
{
struct ext4_prealloc_space *pa;
pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
+
+ BUG_ON(atomic_read(&pa->pa_count));
+ BUG_ON(pa->pa_deleted == 0);
kmem_cache_free(ext4_pspace_cachep, pa);
}
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
ext4_group_t grp;
ext4_fsblk_t grp_blk;
- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
- return;
-
/* in this short window concurrent discard can set pa_deleted */
spin_lock(&pa->pa_lock);
+ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
+ spin_unlock(&pa->pa_lock);
+ return;
+ }
+
if (pa->pa_deleted == 1) {
spin_unlock(&pa->pa_lock);
return;
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */
- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
+ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_sb = sb;
ac->ac_inode = ar->inode;
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
* blocks at the beginning or the end unless we are explicitly
* requested to avoid doing so.
*/
- overflow = block & (sbi->s_cluster_ratio - 1);
+ overflow = EXT4_PBLK_COFF(sbi, block);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
overflow = sbi->s_cluster_ratio - overflow;
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
count += overflow;
}
}
- overflow = count & (sbi->s_cluster_ratio - 1);
+ overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
if (count > overflow)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c977f4e..1f7784d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb)
}
ext4_es_unregister_shrinker(sbi);
- del_timer(&sbi->s_err_report);
+ del_timer_sync(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
@@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb)
}
-static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
+static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
{
ext4_fsblk_t resv_clusters;
/*
+ * There's no need to reserve anything when we aren't using extents.
+ * The space estimates are exact, there are no unwritten extents,
+ * hole punching doesn't need new metadata... This is needed especially
+ * to keep ext2/3 backward compatibility.
+ */
+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
+ return 0;
+ /*
* By default we reserve 2% or 4096 clusters, whichever is smaller.
* This should cover the situations where we can not afford to run
* out of space like for example punch hole, or converting
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
* allocation would require 1, or 2 blocks, higher numbers are
* very rare.
*/
- resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+ resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
+ EXT4_SB(sb)->s_cluster_bits;
do_div(resv_clusters, 50);
resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
@@ -4071,10 +4080,10 @@ no_journal:
"available");
}
- err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
+ err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
if (err) {
ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
- "reserved pool", ext4_calculate_resv_clusters(sbi));
+ "reserved pool", ext4_calculate_resv_clusters(sb));
goto failed_mount4a;
}
@@ -4184,7 +4193,7 @@ failed_mount_wq:
}
failed_mount3:
ext4_es_unregister_shrinker(sbi);
- del_timer(&sbi->s_err_report);
+ del_timer_sync(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1f4a10e..e0259a1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -516,13 +516,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
}
WARN_ON(inode->i_state & I_SYNC);
/*
- * Skip inode if it is clean. We don't want to mess with writeback
- * lists in this function since flusher thread may be doing for example
- * sync in parallel and if we move the inode, it could get skipped. So
- * here we make sure inode is on some writeback list and leave it there
- * unless we have completely cleaned the inode.
+ * Skip inode if it is clean and we have no outstanding writeback in
+ * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
+ * function since flusher thread may be doing for example sync in
+ * parallel and if we move the inode, it could get skipped. So here we
+ * make sure inode is on some writeback list and leave it there unless
+ * we have completely cleaned the inode.
*/
- if (!(inode->i_state & I_DIRTY))
+ if (!(inode->i_state & I_DIRTY) &&
+ (wbc->sync_mode != WB_SYNC_ALL ||
+ !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
inode->i_state |= I_SYNC;
spin_unlock(&inode->i_lock);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b7fc035..73f3e4e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int rv;
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
+ /*
+ * Now since we are holding a deferred (CW) lock at this point, you
+ * might be wondering why this is ever needed. There is a case however
+ * where we've granted a deferred local lock against a cached exclusive
+ * glock. That is ok provided all granted local locks are deferred, but
+ * it also means that it is possible to encounter pages which are
+ * cached and possibly also mapped. So here we check for that and sort
+ * them out ahead of the dio. The glock state machine will take care of
+ * everything else.
+ *
+ * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
+ * the first place, mapping->nr_pages will always be zero.
+ */
+ if (mapping->nrpages) {
+ loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
+ loff_t len = iov_length(iov, nr_segs);
+ loff_t end = PAGE_ALIGN(offset + len) - 1;
+
+ rv = 0;
+ if (len == 0)
+ goto out;
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
+ rv = filemap_write_and_wait_range(mapping, lstart, end);
+ if (rv)
+ return rv;
+ truncate_inode_pages_range(mapping, lstart, end);
+ }
+
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, gfs2_get_block_direct,
NULL, NULL, 0);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c8420f7..6f7a47c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1655,6 +1655,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
struct task_struct *gh_owner = NULL;
char flags_buf[32];
+ rcu_read_lock();
if (gh->gh_owner_pid)
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
@@ -1664,6 +1665,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
gh_owner ? gh_owner->comm : "(ended)",
(void *)gh->gh_ip);
+ rcu_read_unlock();
return 0;
}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index db908f6..f88dcd9 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -192,8 +192,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
if (ip && !S_ISREG(ip->i_inode.i_mode))
ip = NULL;
- if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
- unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ if (ip) {
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ inode_dio_wait(&ip->i_inode);
+ }
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return;
@@ -410,6 +413,9 @@ static int inode_go_lock(struct gfs2_holder *gh)
return error;
}
+ if (gh->gh_state != LM_ST_DEFERRED)
+ inode_dio_wait(&ip->i_inode);
+
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) &&
(gh->gh_state == LM_ST_EXCLUSIVE)) {
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 610613f..9dcb977 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
struct buffer_head *bh = bd->bd_bh;
struct gfs2_glock *gl = bd->bd_gl;
- gfs2_remove_from_ail(bd);
- bd->bd_bh = NULL;
bh->b_private = NULL;
bd->bd_blkno = bh->b_blocknr;
+ gfs2_remove_from_ail(bd); /* drops ref on bh */
+ bd->bd_bh = NULL;
bd->bd_ops = &gfs2_revoke_lops;
sdp->sd_log_num_revoke++;
atomic_inc(&gl->gl_revokes);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 9324150..52f177b 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
struct address_space *mapping = bh->b_page->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+ int was_pinned = 0;
if (test_clear_buffer_pinned(bh)) {
trace_gfs2_pin(bd, 0);
@@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
tr->tr_num_databuf_rm++;
}
tr->tr_touched = 1;
+ was_pinned = 1;
brelse(bh);
}
if (bd) {
spin_lock(&sdp->sd_ail_lock);
if (bd->bd_tr) {
gfs2_trans_add_revoke(sdp, bd);
+ } else if (was_pinned) {
+ bh->b_private = NULL;
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
}
spin_unlock(&sdp->sd_ail_lock);
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b4..52fa883 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
if (IS_ERR(s))
goto error_bdev;
- if (s->s_root)
+ if (s->s_root) {
+ /*
+ * s_umount nests inside bd_mutex during
+ * __invalidate_device(). blkdev_put() acquires
+ * bd_mutex and can't be called under s_umount. Drop
+ * s_umount temporarily. This is safe as we're
+ * holding an active reference.
+ */
+ up_write(&s->s_umount);
blkdev_put(bdev, mode);
+ down_write(&s->s_umount);
+ }
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 5203264..5fa344a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
read_lock(&journal->j_state_lock);
#ifdef CONFIG_JBD2_DEBUG
if (!tid_geq(journal->j_commit_request, tid)) {
- printk(KERN_EMERG
+ printk(KERN_ERR
"%s: error: j_commit_request=%d, tid=%d\n",
__func__, journal->j_commit_request, tid);
}
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
}
read_unlock(&journal->j_state_lock);
- if (unlikely(is_journal_aborted(journal))) {
- printk(KERN_EMERG "journal commit I/O error\n");
+ if (unlikely(is_journal_aborted(journal)))
err = -EIO;
- }
return err;
}
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal)
if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
/* Can't have checksum v1 and v2 on at the same time! */
- printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+ printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
"at the same time!\n");
goto out;
}
if (!jbd2_verify_csum_type(journal, sb)) {
- printk(KERN_ERR "JBD: Unknown checksum type\n");
+ printk(KERN_ERR "JBD2: Unknown checksum type\n");
goto out;
}
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal)
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+ printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
err = PTR_ERR(journal->j_chksum_driver);
journal->j_chksum_driver = NULL;
goto out;
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal)
/* Check superblock checksum */
if (!jbd2_superblock_csum_verify(journal, sb)) {
- printk(KERN_ERR "JBD: journal checksum error\n");
+ printk(KERN_ERR "JBD2: journal checksum error\n");
goto out;
}
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
journal->j_chksum_driver = crypto_alloc_shash("crc32c",
0, 0);
if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD: Cannot load crc32c "
+ printk(KERN_ERR "JBD2: Cannot load crc32c "
"driver.\n");
journal->j_chksum_driver = NULL;
return 0;
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void)
#ifdef CONFIG_JBD2_DEBUG
int n = atomic_read(&nr_journal_heads);
if (n)
- printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
+ printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
#endif
jbd2_remove_jbd_stats_proc_entry();
jbd2_journal_destroy_caches();
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 3929c50..3b6bb19 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal,
be32_to_cpu(tmp->h_sequence))) {
brelse(obh);
success = -EIO;
- printk(KERN_ERR "JBD: Invalid "
+ printk(KERN_ERR "JBD2: Invalid "
"checksum recovering "
"block %llu in log\n",
blocknr);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7aa9a32..8360674 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -932,7 +932,7 @@ repeat:
jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
- printk(KERN_EMERG
+ printk(KERN_ERR
"%s: OOM for frozen_buffer\n",
__func__);
JBUFFER_TRACE(jh, "oom!");
@@ -1166,7 +1166,7 @@ repeat:
if (!jh->b_committed_data) {
committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
- printk(KERN_EMERG "%s: No memory for committed data\n",
+ printk(KERN_ERR "%s: No memory for committed data\n",
__func__);
err = -ENOMEM;
goto out;
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* once a transaction -bzzz
*/
jh->b_modified = 1;
- J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
+ if (handle->h_buffer_credits <= 0) {
+ ret = -ENOSPC;
+ goto out_unlock_bh;
+ }
handle->h_buffer_credits--;
}
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "fastpath");
if (unlikely(jh->b_transaction !=
journal->j_running_transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_transaction (%llu, %p, %u) != "
"journal->j_running_transaction (%p, %u)",
journal->j_devname,
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "already on other transaction");
if (unlikely(jh->b_transaction !=
journal->j_committing_transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_transaction (%llu, %p, %u) != "
"journal->j_committing_transaction (%p, %u)",
journal->j_devname,
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
ret = -EINVAL;
}
if (unlikely(jh->b_next_transaction != transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_next_transaction (%llu, %p, %u) != "
"transaction (%p, %u)",
journal->j_devname,
@@ -1373,7 +1376,6 @@ out_unlock_bh:
jbd2_journal_put_journal_head(jh);
out:
JBUFFER_TRACE(jh, "exit");
- WARN_ON(ret); /* All errors are bugs, so dump the stack */
return ret;
}
diff --git a/fs/kernfs/Makefile b/fs/kernfs/Makefile
new file mode 100644
index 0000000..674337c
--- /dev/null
+++ b/fs/kernfs/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the kernfs pseudo filesystem
+#
+
+obj-y := mount.o inode.o dir.o file.o symlink.o
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
new file mode 100644
index 0000000..5104cf5
--- /dev/null
+++ b/fs/kernfs/dir.c
@@ -0,0 +1,1073 @@
+/*
+ * fs/kernfs/dir.c - kernfs directory implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/hash.h>
+
+#include "kernfs-internal.h"
+
+DEFINE_MUTEX(kernfs_mutex);
+
+#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
+
+/**
+ * kernfs_name_hash
+ * @name: Null terminated string to hash
+ * @ns: Namespace tag to hash
+ *
+ * Returns 31 bit hash of ns + name (so it fits in an off_t )
+ */
+static unsigned int kernfs_name_hash(const char *name, const void *ns)
+{
+ unsigned long hash = init_name_hash();
+ unsigned int len = strlen(name);
+ while (len--)
+ hash = partial_name_hash(*name++, hash);
+ hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
+ hash &= 0x7fffffffU;
+ /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
+ if (hash < 1)
+ hash += 2;
+ if (hash >= INT_MAX)
+ hash = INT_MAX - 1;
+ return hash;
+}
+
+static int kernfs_name_compare(unsigned int hash, const char *name,
+ const void *ns, const struct kernfs_node *kn)
+{
+ if (hash != kn->hash)
+ return hash - kn->hash;
+ if (ns != kn->ns)
+ return ns - kn->ns;
+ return strcmp(name, kn->name);
+}
+
+static int kernfs_sd_compare(const struct kernfs_node *left,
+ const struct kernfs_node *right)
+{
+ return kernfs_name_compare(left->hash, left->name, left->ns, right);
+}
+
+/**
+ * kernfs_link_sibling - link kernfs_node into sibling rbtree
+ * @kn: kernfs_node of interest
+ *
+ * Link @kn into its sibling rbtree which starts from
+ * @kn->parent->dir.children.
+ *
+ * Locking:
+ * mutex_lock(kernfs_mutex)
+ *
+ * RETURNS:
+ * 0 on susccess -EEXIST on failure.
+ */
+static int kernfs_link_sibling(struct kernfs_node *kn)
+{
+ struct rb_node **node = &kn->parent->dir.children.rb_node;
+ struct rb_node *parent = NULL;
+
+ if (kernfs_type(kn) == KERNFS_DIR)
+ kn->parent->dir.subdirs++;
+
+ while (*node) {
+ struct kernfs_node *pos;
+ int result;
+
+ pos = rb_to_kn(*node);
+ parent = *node;
+ result = kernfs_sd_compare(kn, pos);
+ if (result < 0)
+ node = &pos->rb.rb_left;
+ else if (result > 0)
+ node = &pos->rb.rb_right;
+ else
+ return -EEXIST;
+ }
+ /* add new node and rebalance the tree */
+ rb_link_node(&kn->rb, parent, node);
+ rb_insert_color(&kn->rb, &kn->parent->dir.children);
+ return 0;
+}
+
+/**
+ * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
+ * @kn: kernfs_node of interest
+ *
+ * Unlink @kn from its sibling rbtree which starts from
+ * kn->parent->dir.children.
+ *
+ * Locking:
+ * mutex_lock(kernfs_mutex)
+ */
+static void kernfs_unlink_sibling(struct kernfs_node *kn)
+{
+ if (kernfs_type(kn) == KERNFS_DIR)
+ kn->parent->dir.subdirs--;
+
+ rb_erase(&kn->rb, &kn->parent->dir.children);
+}
+
+/**
+ * kernfs_get_active - get an active reference to kernfs_node
+ * @kn: kernfs_node to get an active reference to
+ *
+ * Get an active reference of @kn. This function is noop if @kn
+ * is NULL.
+ *
+ * RETURNS:
+ * Pointer to @kn on success, NULL on failure.
+ */
+struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
+{
+ if (unlikely(!kn))
+ return NULL;
+
+ if (!atomic_inc_unless_negative(&kn->active))
+ return NULL;
+
+ if (kn->flags & KERNFS_LOCKDEP)
+ rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
+ return kn;
+}
+
+/**
+ * kernfs_put_active - put an active reference to kernfs_node
+ * @kn: kernfs_node to put an active reference to
+ *
+ * Put an active reference to @kn. This function is noop if @kn
+ * is NULL.
+ */
+void kernfs_put_active(struct kernfs_node *kn)
+{
+ int v;
+
+ if (unlikely(!kn))
+ return;
+
+ if (kn->flags & KERNFS_LOCKDEP)
+ rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ v = atomic_dec_return(&kn->active);
+ if (likely(v != KN_DEACTIVATED_BIAS))
+ return;
+
+ /*
+ * atomic_dec_return() is a mb(), we'll always see the updated
+ * kn->u.completion.
+ */
+ complete(kn->u.completion);
+}
+
+/**
+ * kernfs_deactivate - deactivate kernfs_node
+ * @kn: kernfs_node to deactivate
+ *
+ * Deny new active references and drain existing ones.
+ */
+static void kernfs_deactivate(struct kernfs_node *kn)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int v;
+
+ BUG_ON(!(kn->flags & KERNFS_REMOVED));
+
+ if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
+ return;
+
+ kn->u.completion = (void *)&wait;
+
+ rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
+ /* atomic_add_return() is a mb(), put_active() will always see
+ * the updated kn->u.completion.
+ */
+ v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);
+
+ if (v != KN_DEACTIVATED_BIAS) {
+ lock_contended(&kn->dep_map, _RET_IP_);
+ wait_for_completion(&wait);
+ }
+
+ lock_acquired(&kn->dep_map, _RET_IP_);
+ rwsem_release(&kn->dep_map, 1, _RET_IP_);
+}
+
+/**
+ * kernfs_get - get a reference count on a kernfs_node
+ * @kn: the target kernfs_node
+ */
+void kernfs_get(struct kernfs_node *kn)
+{
+ if (kn) {
+ WARN_ON(!atomic_read(&kn->count));
+ atomic_inc(&kn->count);
+ }
+}
+EXPORT_SYMBOL_GPL(kernfs_get);
+
+/**
+ * kernfs_put - put a reference count on a kernfs_node
+ * @kn: the target kernfs_node
+ *
+ * Put a reference count of @kn and destroy it if it reached zero.
+ */
+void kernfs_put(struct kernfs_node *kn)
+{
+ struct kernfs_node *parent;
+ struct kernfs_root *root;
+
+ if (!kn || !atomic_dec_and_test(&kn->count))
+ return;
+ root = kernfs_root(kn);
+ repeat:
+ /* Moving/renaming is always done while holding reference.
+ * kn->parent won't change beneath us.
+ */
+ parent = kn->parent;
+
+ WARN(!(kn->flags & KERNFS_REMOVED), "kernfs: free using entry: %s/%s\n",
+ parent ? parent->name : "", kn->name);
+
+ if (kernfs_type(kn) == KERNFS_LINK)
+ kernfs_put(kn->symlink.target_kn);
+ if (!(kn->flags & KERNFS_STATIC_NAME))
+ kfree(kn->name);
+ if (kn->iattr) {
+ if (kn->iattr->ia_secdata)
+ security_release_secctx(kn->iattr->ia_secdata,
+ kn->iattr->ia_secdata_len);
+ simple_xattrs_free(&kn->iattr->xattrs);
+ }
+ kfree(kn->iattr);
+ ida_simple_remove(&root->ino_ida, kn->ino);
+ kmem_cache_free(kernfs_node_cache, kn);
+
+ kn = parent;
+ if (kn) {
+ if (atomic_dec_and_test(&kn->count))
+ goto repeat;
+ } else {
+ /* just released the root kn, free @root too */
+ ida_destroy(&root->ino_ida);
+ kfree(root);
+ }
+}
+EXPORT_SYMBOL_GPL(kernfs_put);
+
+static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct kernfs_node *kn;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ /* Always perform fresh lookup for negatives */
+ if (!dentry->d_inode)
+ goto out_bad_unlocked;
+
+ kn = dentry->d_fsdata;
+ mutex_lock(&kernfs_mutex);
+
+ /* The kernfs node has been deleted */
+ if (kn->flags & KERNFS_REMOVED)
+ goto out_bad;
+
+ /* The kernfs node has been moved? */
+ if (dentry->d_parent->d_fsdata != kn->parent)
+ goto out_bad;
+
+ /* The kernfs node has been renamed */
+ if (strcmp(dentry->d_name.name, kn->name) != 0)
+ goto out_bad;
+
+ /* The kernfs node has been moved to a different namespace */
+ if (kn->parent && kernfs_ns_enabled(kn->parent) &&
+ kernfs_info(dentry->d_sb)->ns != kn->ns)
+ goto out_bad;
+
+ mutex_unlock(&kernfs_mutex);
+out_valid:
+ return 1;
+out_bad:
+ mutex_unlock(&kernfs_mutex);
+out_bad_unlocked:
+ /*
+ * @dentry doesn't match the underlying kernfs node, drop the
+ * dentry and force lookup. If we have submounts we must allow the
+ * vfs caches to lie about the state of the filesystem to prevent
+ * leaks and other nasty things, so use check_submounts_and_drop()
+ * instead of d_drop().
+ */
+ if (check_submounts_and_drop(dentry) != 0)
+ goto out_valid;
+
+ return 0;
+}
+
+static void kernfs_dop_release(struct dentry *dentry)
+{
+ kernfs_put(dentry->d_fsdata);
+}
+
+const struct dentry_operations kernfs_dops = {
+ .d_revalidate = kernfs_dop_revalidate,
+ .d_release = kernfs_dop_release,
+};
+
+static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
+ const char *name, umode_t mode,
+ unsigned flags)
+{
+ char *dup_name = NULL;
+ struct kernfs_node *kn;
+ int ret;
+
+ if (!(flags & KERNFS_STATIC_NAME)) {
+ name = dup_name = kstrdup(name, GFP_KERNEL);
+ if (!name)
+ return NULL;
+ }
+
+ kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
+ if (!kn)
+ goto err_out1;
+
+ ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto err_out2;
+ kn->ino = ret;
+
+ atomic_set(&kn->count, 1);
+ atomic_set(&kn->active, 0);
+
+ kn->name = name;
+ kn->mode = mode;
+ kn->flags = flags | KERNFS_REMOVED;
+
+ return kn;
+
+ err_out2:
+ kmem_cache_free(kernfs_node_cache, kn);
+ err_out1:
+ kfree(dup_name);
+ return NULL;
+}
+
+struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
+ const char *name, umode_t mode,
+ unsigned flags)
+{
+ struct kernfs_node *kn;
+
+ kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
+ if (kn) {
+ kernfs_get(parent);
+ kn->parent = parent;
+ }
+ return kn;
+}
+
+/**
+ * kernfs_addrm_start - prepare for kernfs_node add/remove
+ * @acxt: pointer to kernfs_addrm_cxt to be used
+ *
+ * This function is called when the caller is about to add or remove
+ * kernfs_node. This function acquires kernfs_mutex. @acxt is used
+ * to keep and pass context to other addrm functions.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep). kernfs_mutex is locked on
+ * return.
+ */
+void kernfs_addrm_start(struct kernfs_addrm_cxt *acxt)
+ __acquires(kernfs_mutex)
+{
+ memset(acxt, 0, sizeof(*acxt));
+
+ mutex_lock(&kernfs_mutex);
+}
+
+/**
+ * kernfs_add_one - add kernfs_node to parent without warning
+ * @acxt: addrm context to use
+ * @kn: kernfs_node to be added
+ *
+ * The caller must already have initialized @kn->parent. This
+ * function increments nlink of the parent's inode if @kn is a
+ * directory and link into the children list of the parent.
+ *
+ * This function should be called between calls to
+ * kernfs_addrm_start() and kernfs_addrm_finish() and should be passed
+ * the same @acxt as passed to kernfs_addrm_start().
+ *
+ * LOCKING:
+ * Determined by kernfs_addrm_start().
+ *
+ * RETURNS:
+ * 0 on success, -EEXIST if entry with the given name already
+ * exists.
+ */
+int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn)
+{
+ struct kernfs_node *parent = kn->parent;
+ bool has_ns = kernfs_ns_enabled(parent);
+ struct kernfs_iattrs *ps_iattr;
+ int ret;
+
+ if (has_ns != (bool)kn->ns) {
+ WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
+ has_ns ? "required" : "invalid", parent->name, kn->name);
+ return -EINVAL;
+ }
+
+ if (kernfs_type(parent) != KERNFS_DIR)
+ return -EINVAL;
+
+ if (parent->flags & KERNFS_REMOVED)
+ return -ENOENT;
+
+ kn->hash = kernfs_name_hash(kn->name, kn->ns);
+
+ ret = kernfs_link_sibling(kn);
+ if (ret)
+ return ret;
+
+ /* Update timestamps on the parent */
+ ps_iattr = parent->iattr;
+ if (ps_iattr) {
+ struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
+ ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
+ }
+
+ /* Mark the entry added into directory tree */
+ kn->flags &= ~KERNFS_REMOVED;
+
+ return 0;
+}
+
+/**
+ * kernfs_remove_one - remove kernfs_node from parent
+ * @acxt: addrm context to use
+ * @kn: kernfs_node to be removed
+ *
+ * Mark @kn removed and drop nlink of parent inode if @kn is a
+ * directory. @kn is unlinked from the children list.
+ *
+ * This function should be called between calls to
+ * kernfs_addrm_start() and kernfs_addrm_finish() and should be
+ * passed the same @acxt as passed to kernfs_addrm_start().
+ *
+ * LOCKING:
+ * Determined by kernfs_addrm_start().
+ */
+static void kernfs_remove_one(struct kernfs_addrm_cxt *acxt,
+ struct kernfs_node *kn)
+{
+ struct kernfs_iattrs *ps_iattr;
+
+ /*
+ * Removal can be called multiple times on the same node. Only the
+ * first invocation is effective and puts the base ref.
+ */
+ if (kn->flags & KERNFS_REMOVED)
+ return;
+
+ if (kn->parent) {
+ kernfs_unlink_sibling(kn);
+
+ /* Update timestamps on the parent */
+ ps_iattr = kn->parent->iattr;
+ if (ps_iattr) {
+ ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
+ ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
+ }
+ }
+
+ kn->flags |= KERNFS_REMOVED;
+ kn->u.removed_list = acxt->removed;
+ acxt->removed = kn;
+}
+
+/**
+ * kernfs_addrm_finish - finish up kernfs_node add/remove
+ * @acxt: addrm context to finish up
+ *
+ * Finish up kernfs_node add/remove. Resources acquired by
+ * kernfs_addrm_start() are released and removed kernfs_nodes are
+ * cleaned up.
+ *
+ * LOCKING:
+ * kernfs_mutex is released.
+ */
+void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt)
+ __releases(kernfs_mutex)
+{
+ /* release resources acquired by kernfs_addrm_start() */
+ mutex_unlock(&kernfs_mutex);
+
+ /* kill removed kernfs_nodes */
+ while (acxt->removed) {
+ struct kernfs_node *kn = acxt->removed;
+
+ acxt->removed = kn->u.removed_list;
+
+ kernfs_deactivate(kn);
+ kernfs_unmap_bin_file(kn);
+ kernfs_put(kn);
+ }
+}
+
+/**
+ * kernfs_find_ns - find kernfs_node with the given name
+ * @parent: kernfs_node to search under
+ * @name: name to look for
+ * @ns: the namespace tag to use
+ *
+ * Look for kernfs_node with name @name under @parent. Returns pointer to
+ * the found kernfs_node on success, %NULL on failure.
+ */
+static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
+ const unsigned char *name,
+ const void *ns)
+{
+ struct rb_node *node = parent->dir.children.rb_node;
+ bool has_ns = kernfs_ns_enabled(parent);
+ unsigned int hash;
+
+ lockdep_assert_held(&kernfs_mutex);
+
+ if (has_ns != (bool)ns) {
+ WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
+ has_ns ? "required" : "invalid", parent->name, name);
+ return NULL;
+ }
+
+ hash = kernfs_name_hash(name, ns);
+ while (node) {
+ struct kernfs_node *kn;
+ int result;
+
+ kn = rb_to_kn(node);
+ result = kernfs_name_compare(hash, name, ns, kn);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return kn;
+ }
+ return NULL;
+}
+
+/**
+ * kernfs_find_and_get_ns - find and get kernfs_node with the given name
+ * @parent: kernfs_node to search under
+ * @name: name to look for
+ * @ns: the namespace tag to use
+ *
+ * Look for kernfs_node with name @name under @parent and get a reference
+ * if found. This function may sleep and returns pointer to the found
+ * kernfs_node on success, %NULL on failure.
+ */
+struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
+ const char *name, const void *ns)
+{
+ struct kernfs_node *kn;
+
+ mutex_lock(&kernfs_mutex);
+ kn = kernfs_find_ns(parent, name, ns);
+ kernfs_get(kn);
+ mutex_unlock(&kernfs_mutex);
+
+ return kn;
+}
+EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
+
+/**
+ * kernfs_create_root - create a new kernfs hierarchy
+ * @kdops: optional directory syscall operations for the hierarchy
+ * @priv: opaque data associated with the new directory
+ *
+ * Returns the root of the new hierarchy on success, ERR_PTR() value on
+ * failure.
+ */
+struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
+{
+ struct kernfs_root *root;
+ struct kernfs_node *kn;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+
+ ida_init(&root->ino_ida);
+
+ kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
+ KERNFS_DIR);
+ if (!kn) {
+ ida_destroy(&root->ino_ida);
+ kfree(root);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kn->flags &= ~KERNFS_REMOVED;
+ kn->priv = priv;
+ kn->dir.root = root;
+
+ root->dir_ops = kdops;
+ root->kn = kn;
+
+ return root;
+}
+
+/**
+ * kernfs_destroy_root - destroy a kernfs hierarchy
+ * @root: root of the hierarchy to destroy
+ *
+ * Destroy the hierarchy anchored at @root by removing all existing
+ * directories and destroying @root.
+ */
+void kernfs_destroy_root(struct kernfs_root *root)
+{
+ kernfs_remove(root->kn); /* will also free @root */
+}
+
+/**
+ * kernfs_create_dir_ns - create a directory
+ * @parent: parent in which to create a new directory
+ * @name: name of the new directory
+ * @mode: mode of the new directory
+ * @priv: opaque data associated with the new directory
+ * @ns: optional namespace tag of the directory
+ *
+ * Returns the created node on success, ERR_PTR() value on failure.
+ */
+struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+ const char *name, umode_t mode,
+ void *priv, const void *ns)
+{
+ struct kernfs_addrm_cxt acxt;
+ struct kernfs_node *kn;
+ int rc;
+
+ /* allocate */
+ kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
+ if (!kn)
+ return ERR_PTR(-ENOMEM);
+
+ kn->dir.root = parent->dir.root;
+ kn->ns = ns;
+ kn->priv = priv;
+
+ /* link in */
+ kernfs_addrm_start(&acxt);
+ rc = kernfs_add_one(&acxt, kn);
+ kernfs_addrm_finish(&acxt);
+
+ if (!rc)
+ return kn;
+
+ kernfs_put(kn);
+ return ERR_PTR(rc);
+}
+
+static struct dentry *kernfs_iop_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags)
+{
+ struct dentry *ret;
+ struct kernfs_node *parent = dentry->d_parent->d_fsdata;
+ struct kernfs_node *kn;
+ struct inode *inode;
+ const void *ns = NULL;
+
+ mutex_lock(&kernfs_mutex);
+
+ if (kernfs_ns_enabled(parent))
+ ns = kernfs_info(dir->i_sb)->ns;
+
+ kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
+
+ /* no such entry */
+ if (!kn) {
+ ret = NULL;
+ goto out_unlock;
+ }
+ kernfs_get(kn);
+ dentry->d_fsdata = kn;
+
+ /* attach dentry and inode */
+ inode = kernfs_get_inode(dir->i_sb, kn);
+ if (!inode) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_unlock;
+ }
+
+ /* instantiate and hash dentry */
+ ret = d_materialise_unique(dentry, inode);
+ out_unlock:
+ mutex_unlock(&kernfs_mutex);
+ return ret;
+}
+
+static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ struct kernfs_node *parent = dir->i_private;
+ struct kernfs_dir_ops *kdops = kernfs_root(parent)->dir_ops;
+
+ if (!kdops || !kdops->mkdir)
+ return -EPERM;
+
+ return kdops->mkdir(parent, dentry->d_name.name, mode);
+}
+
+static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops;
+
+ if (!kdops || !kdops->rmdir)
+ return -EPERM;
+
+ return kdops->rmdir(kn);
+}
+
+static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct kernfs_node *kn = old_dentry->d_fsdata;
+ struct kernfs_node *new_parent = new_dir->i_private;
+ struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops;
+
+ if (!kdops || !kdops->rename)
+ return -EPERM;
+
+ return kdops->rename(kn, new_parent, new_dentry->d_name.name);
+}
+
+const struct inode_operations kernfs_dir_iops = {
+ .lookup = kernfs_iop_lookup,
+ .permission = kernfs_iop_permission,
+ .setattr = kernfs_iop_setattr,
+ .getattr = kernfs_iop_getattr,
+ .setxattr = kernfs_iop_setxattr,
+ .removexattr = kernfs_iop_removexattr,
+ .getxattr = kernfs_iop_getxattr,
+ .listxattr = kernfs_iop_listxattr,
+
+ .mkdir = kernfs_iop_mkdir,
+ .rmdir = kernfs_iop_rmdir,
+ .rename = kernfs_iop_rename,
+};
+
+static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
+{
+ struct kernfs_node *last;
+
+ while (true) {
+ struct rb_node *rbn;
+
+ last = pos;
+
+ if (kernfs_type(pos) != KERNFS_DIR)
+ break;
+
+ rbn = rb_first(&pos->dir.children);
+ if (!rbn)
+ break;
+
+ pos = rb_to_kn(rbn);
+ }
+
+ return last;
+}
+
+/**
+ * kernfs_next_descendant_post - find the next descendant for post-order walk
+ * @pos: the current position (%NULL to initiate traversal)
+ * @root: kernfs_node whose descendants to walk
+ *
+ * Find the next descendant to visit for post-order traversal of @root's
+ * descendants. @root is included in the iteration and the last node to be
+ * visited.
+ */
+static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
+ struct kernfs_node *root)
+{
+ struct rb_node *rbn;
+
+ lockdep_assert_held(&kernfs_mutex);
+
+ /* if first iteration, visit leftmost descendant which may be root */
+ if (!pos)
+ return kernfs_leftmost_descendant(root);
+
+ /* if we visited @root, we're done */
+ if (pos == root)
+ return NULL;
+
+ /* if there's an unvisited sibling, visit its leftmost descendant */
+ rbn = rb_next(&pos->rb);
+ if (rbn)
+ return kernfs_leftmost_descendant(rb_to_kn(rbn));
+
+ /* no sibling left, visit parent */
+ return pos->parent;
+}
+
+static void __kernfs_remove(struct kernfs_addrm_cxt *acxt,
+ struct kernfs_node *kn)
+{
+ struct kernfs_node *pos, *next;
+
+ if (!kn)
+ return;
+
+ pr_debug("kernfs %s: removing\n", kn->name);
+
+ next = NULL;
+ do {
+ pos = next;
+ next = kernfs_next_descendant_post(pos, kn);
+ if (pos)
+ kernfs_remove_one(acxt, pos);
+ } while (next);
+}
+
+/**
+ * kernfs_remove - remove a kernfs_node recursively
+ * @kn: the kernfs_node to remove
+ *
+ * Remove @kn along with all its subdirectories and files.
+ */
+void kernfs_remove(struct kernfs_node *kn)
+{
+ struct kernfs_addrm_cxt acxt;
+
+ kernfs_addrm_start(&acxt);
+ __kernfs_remove(&acxt, kn);
+ kernfs_addrm_finish(&acxt);
+}
+
+/**
+ * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
+ * @parent: parent of the target
+ * @name: name of the kernfs_node to remove
+ * @ns: namespace tag of the kernfs_node to remove
+ *
+ * Look for the kernfs_node with @name and @ns under @parent and remove it.
+ * Returns 0 on success, -ENOENT if such entry doesn't exist.
+ */
+int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
+ const void *ns)
+{
+ struct kernfs_addrm_cxt acxt;
+ struct kernfs_node *kn;
+
+ if (!parent) {
+ WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
+ name);
+ return -ENOENT;
+ }
+
+ kernfs_addrm_start(&acxt);
+
+ kn = kernfs_find_ns(parent, name, ns);
+ if (kn)
+ __kernfs_remove(&acxt, kn);
+
+ kernfs_addrm_finish(&acxt);
+
+ if (kn)
+ return 0;
+ else
+ return -ENOENT;
+}
+
+/**
+ * kernfs_rename_ns - move and rename a kernfs_node
+ * @kn: target node
+ * @new_parent: new parent to put @sd under
+ * @new_name: new name
+ * @new_ns: new namespace tag
+ */
+int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ const char *new_name, const void *new_ns)
+{
+ int error;
+
+ mutex_lock(&kernfs_mutex);
+
+ error = -ENOENT;
+ if ((kn->flags | new_parent->flags) & KERNFS_REMOVED)
+ goto out;
+
+ error = 0;
+ if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
+ (strcmp(kn->name, new_name) == 0))
+ goto out; /* nothing to rename */
+
+ error = -EEXIST;
+ if (kernfs_find_ns(new_parent, new_name, new_ns))
+ goto out;
+
+ /* rename kernfs_node */
+ if (strcmp(kn->name, new_name) != 0) {
+ error = -ENOMEM;
+ new_name = kstrdup(new_name, GFP_KERNEL);
+ if (!new_name)
+ goto out;
+
+ if (kn->flags & KERNFS_STATIC_NAME)
+ kn->flags &= ~KERNFS_STATIC_NAME;
+ else
+ kfree(kn->name);
+
+ kn->name = new_name;
+ }
+
+ /*
+ * Move to the appropriate place in the appropriate directories rbtree.
+ */
+ kernfs_unlink_sibling(kn);
+ kernfs_get(new_parent);
+ kernfs_put(kn->parent);
+ kn->ns = new_ns;
+ kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->parent = new_parent;
+ kernfs_link_sibling(kn);
+
+ error = 0;
+ out:
+ mutex_unlock(&kernfs_mutex);
+ return error;
+}
+
+/* Relationship between s_mode and the DT_xxx types */
+static inline unsigned char dt_type(struct kernfs_node *kn)
+{
+ return (kn->mode >> 12) & 15;
+}
+
+static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
+{
+ kernfs_put(filp->private_data);
+ return 0;
+}
+
+static struct kernfs_node *kernfs_dir_pos(const void *ns,
+ struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
+{
+ if (pos) {
+ int valid = !(pos->flags & KERNFS_REMOVED) &&
+ pos->parent == parent && hash == pos->hash;
+ kernfs_put(pos);
+ if (!valid)
+ pos = NULL;
+ }
+ if (!pos && (hash > 1) && (hash < INT_MAX)) {
+ struct rb_node *node = parent->dir.children.rb_node;
+ while (node) {
+ pos = rb_to_kn(node);
+
+ if (hash < pos->hash)
+ node = node->rb_left;
+ else if (hash > pos->hash)
+ node = node->rb_right;
+ else
+ break;
+ }
+ }
+ /* Skip over entries in the wrong namespace */
+ while (pos && pos->ns != ns) {
+ struct rb_node *node = rb_next(&pos->rb);
+ if (!node)
+ pos = NULL;
+ else
+ pos = rb_to_kn(node);
+ }
+ return pos;
+}
+
+static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
+ struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
+{
+ pos = kernfs_dir_pos(ns, parent, ino, pos);
+ if (pos)
+ do {
+ struct rb_node *node = rb_next(&pos->rb);
+ if (!node)
+ pos = NULL;
+ else
+ pos = rb_to_kn(node);
+ } while (pos && pos->ns != ns);
+ return pos;
+}
+
+static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct kernfs_node *parent = dentry->d_fsdata;
+ struct kernfs_node *pos = file->private_data;
+ const void *ns = NULL;
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+ mutex_lock(&kernfs_mutex);
+
+ if (kernfs_ns_enabled(parent))
+ ns = kernfs_info(dentry->d_sb)->ns;
+
+ for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
+ pos;
+ pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
+ const char *name = pos->name;
+ unsigned int type = dt_type(pos);
+ int len = strlen(name);
+ ino_t ino = pos->ino;
+
+ ctx->pos = pos->hash;
+ file->private_data = pos;
+ kernfs_get(pos);
+
+ mutex_unlock(&kernfs_mutex);
+ if (!dir_emit(ctx, name, len, ino, type))
+ return 0;
+ mutex_lock(&kernfs_mutex);
+ }
+ mutex_unlock(&kernfs_mutex);
+ file->private_data = NULL;
+ ctx->pos = INT_MAX;
+ return 0;
+}
+
+static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
+ int whence)
+{
+ struct inode *inode = file_inode(file);
+ loff_t ret;
+
+ mutex_lock(&inode->i_mutex);
+ ret = generic_file_llseek(file, offset, whence);
+ mutex_unlock(&inode->i_mutex);
+
+ return ret;
+}
+
+const struct file_operations kernfs_dir_fops = {
+ .read = generic_read_dir,
+ .iterate = kernfs_fop_readdir,
+ .release = kernfs_dir_fop_release,
+ .llseek = kernfs_dir_fop_llseek,
+};
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
new file mode 100644
index 0000000..dbf397b
--- /dev/null
+++ b/fs/kernfs/file.c
@@ -0,0 +1,867 @@
+/*
+ * fs/kernfs/file.c - kernfs file implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+
+#include "kernfs-internal.h"
+
+/*
+ * There's one kernfs_open_file for each open file and one kernfs_open_node
+ * for each kernfs_node with one or more open files.
+ *
+ * kernfs_node->attr.open points to kernfs_open_node. attr.open is
+ * protected by kernfs_open_node_lock.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * kernfs_open_file. kernfs_open_files are chained at
+ * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
+ */
+static DEFINE_SPINLOCK(kernfs_open_node_lock);
+static DEFINE_MUTEX(kernfs_open_file_mutex);
+
+struct kernfs_open_node {
+ atomic_t refcnt;
+ atomic_t event;
+ wait_queue_head_t poll;
+ struct list_head files; /* goes through kernfs_open_file.list */
+};
+
+static struct kernfs_open_file *kernfs_of(struct file *file)
+{
+ return ((struct seq_file *)file->private_data)->private;
+}
+
+/*
+ * Determine the kernfs_ops for the given kernfs_node. This function must
+ * be called while holding an active reference.
+ */
+static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
+{
+ if (kn->flags & KERNFS_LOCKDEP)
+ lockdep_assert_held(kn);
+ return kn->attr.ops;
+}
+
+/*
+ * As kernfs_seq_stop() is also called after kernfs_seq_start() or
+ * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
+ * a seq_file iteration which is fully initialized with an active reference
+ * or an aborted kernfs_seq_start() due to get_active failure. The
+ * position pointer is the only context for each seq_file iteration and
+ * thus the stop condition should be encoded in it. As the return value is
+ * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
+ * choice to indicate get_active failure.
+ *
+ * Unfortunately, this is complicated due to the optional custom seq_file
+ * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
+ * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
+ * custom seq_file operations and thus can't decide whether put_active
+ * should be performed or not only on ERR_PTR(-ENODEV).
+ *
+ * This is worked around by factoring out the custom seq_stop() and
+ * put_active part into kernfs_seq_stop_active(), skipping it from
+ * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
+ * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
+ * that kernfs_seq_stop_active() is skipped only after get_active failure.
+ */
+static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
+{
+ struct kernfs_open_file *of = sf->private;
+ const struct kernfs_ops *ops = kernfs_ops(of->kn);
+
+ if (ops->seq_stop)
+ ops->seq_stop(sf, v);
+ kernfs_put_active(of->kn);
+}
+
+static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
+{
+ struct kernfs_open_file *of = sf->private;
+ const struct kernfs_ops *ops;
+
+ /*
+ * @of->mutex nests outside active ref and is just to ensure that
+ * the ops aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!kernfs_get_active(of->kn))
+ return ERR_PTR(-ENODEV);
+
+ ops = kernfs_ops(of->kn);
+ if (ops->seq_start) {
+ void *next = ops->seq_start(sf, ppos);
+ /* see the comment above kernfs_seq_stop_active() */
+ if (next == ERR_PTR(-ENODEV))
+ kernfs_seq_stop_active(sf, next);
+ return next;
+ } else {
+ /*
+ * The same behavior and code as single_open(). Returns
+ * !NULL if pos is at the beginning; otherwise, NULL.
+ */
+ return NULL + !*ppos;
+ }
+}
+
+static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
+{
+ struct kernfs_open_file *of = sf->private;
+ const struct kernfs_ops *ops = kernfs_ops(of->kn);
+
+ if (ops->seq_next) {
+ void *next = ops->seq_next(sf, v, ppos);
+ /* see the comment above kernfs_seq_stop_active() */
+ if (next == ERR_PTR(-ENODEV))
+ kernfs_seq_stop_active(sf, next);
+ return next;
+ } else {
+ /*
+ * The same behavior and code as single_open(), always
+ * terminate after the initial read.
+ */
+ ++*ppos;
+ return NULL;
+ }
+}
+
+static void kernfs_seq_stop(struct seq_file *sf, void *v)
+{
+ struct kernfs_open_file *of = sf->private;
+
+ if (v != ERR_PTR(-ENODEV))
+ kernfs_seq_stop_active(sf, v);
+ mutex_unlock(&of->mutex);
+}
+
+static int kernfs_seq_show(struct seq_file *sf, void *v)
+{
+ struct kernfs_open_file *of = sf->private;
+
+ of->event = atomic_read(&of->kn->attr.open->event);
+
+ return of->kn->attr.ops->seq_show(sf, v);
+}
+
+static const struct seq_operations kernfs_seq_ops = {
+ .start = kernfs_seq_start,
+ .next = kernfs_seq_next,
+ .stop = kernfs_seq_stop,
+ .show = kernfs_seq_show,
+};
+
+/*
+ * As reading a bin file can have side-effects, the exact offset and bytes
+ * specified in read(2) call should be passed to the read callback making
+ * it difficult to use seq_file. Implement simplistic custom buffering for
+ * bin files.
+ */
+static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t len = min_t(size_t, count, PAGE_SIZE);
+ const struct kernfs_ops *ops;
+ char *buf;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * @of->mutex nests outside active ref and is just to ensure that
+ * the ops aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!kernfs_get_active(of->kn)) {
+ len = -ENODEV;
+ mutex_unlock(&of->mutex);
+ goto out_free;
+ }
+
+ ops = kernfs_ops(of->kn);
+ if (ops->read)
+ len = ops->read(of, buf, len, *ppos);
+ else
+ len = -EINVAL;
+
+ kernfs_put_active(of->kn);
+ mutex_unlock(&of->mutex);
+
+ if (len < 0)
+ goto out_free;
+
+ if (copy_to_user(user_buf, buf, len)) {
+ len = -EFAULT;
+ goto out_free;
+ }
+
+ *ppos += len;
+
+ out_free:
+ kfree(buf);
+ return len;
+}
+
+/**
+ * kernfs_fop_read - kernfs vfs read callback
+ * @file: file pointer
+ * @user_buf: data to write
+ * @count: number of bytes
+ * @ppos: starting offset
+ */
+static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct kernfs_open_file *of = kernfs_of(file);
+
+ if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
+ return seq_read(file, user_buf, count, ppos);
+ else
+ return kernfs_file_direct_read(of, user_buf, count, ppos);
+}
+
+/**
+ * kernfs_fop_write - kernfs vfs write callback
+ * @file: file pointer
+ * @user_buf: data to write
+ * @count: number of bytes
+ * @ppos: starting offset
+ *
+ * Copy data in from userland and pass it to the matching kernfs write
+ * operation.
+ *
+ * There is no easy way for us to know if userspace is only doing a partial
+ * write, so we don't support them. We expect the entire buffer to come on
+ * the first write. Hint: if you're writing a value, first read the file,
+ * modify only the the value you're changing, then write entire buffer
+ * back.
+ */
+static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct kernfs_open_file *of = kernfs_of(file);
+ ssize_t len = min_t(size_t, count, PAGE_SIZE);
+ const struct kernfs_ops *ops;
+ char *buf;
+
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, len)) {
+ len = -EFAULT;
+ goto out_free;
+ }
+ buf[len] = '\0'; /* guarantee string termination */
+
+ /*
+ * @of->mutex nests outside active ref and is just to ensure that
+ * the ops aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!kernfs_get_active(of->kn)) {
+ mutex_unlock(&of->mutex);
+ len = -ENODEV;
+ goto out_free;
+ }
+
+ ops = kernfs_ops(of->kn);
+ if (ops->write)
+ len = ops->write(of, buf, len, *ppos);
+ else
+ len = -EINVAL;
+
+ kernfs_put_active(of->kn);
+ mutex_unlock(&of->mutex);
+
+ if (len > 0)
+ *ppos += len;
+out_free:
+ kfree(buf);
+ return len;
+}
+
+static void kernfs_vma_open(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+
+ if (!of->vm_ops)
+ return;
+
+ if (!kernfs_get_active(of->kn))
+ return;
+
+ if (of->vm_ops->open)
+ of->vm_ops->open(vma);
+
+ kernfs_put_active(of->kn);
+}
+
+static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return VM_FAULT_SIGBUS;
+
+ if (!kernfs_get_active(of->kn))
+ return VM_FAULT_SIGBUS;
+
+ ret = VM_FAULT_SIGBUS;
+ if (of->vm_ops->fault)
+ ret = of->vm_ops->fault(vma, vmf);
+
+ kernfs_put_active(of->kn);
+ return ret;
+}
+
+static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return VM_FAULT_SIGBUS;
+
+ if (!kernfs_get_active(of->kn))
+ return VM_FAULT_SIGBUS;
+
+ ret = 0;
+ if (of->vm_ops->page_mkwrite)
+ ret = of->vm_ops->page_mkwrite(vma, vmf);
+ else
+ file_update_time(file);
+
+ kernfs_put_active(of->kn);
+ return ret;
+}
+
+static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return -EINVAL;
+
+ if (!kernfs_get_active(of->kn))
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (of->vm_ops->access)
+ ret = of->vm_ops->access(vma, addr, buf, len, write);
+
+ kernfs_put_active(of->kn);
+ return ret;
+}
+
+#ifdef CONFIG_NUMA
+static int kernfs_vma_set_policy(struct vm_area_struct *vma,
+ struct mempolicy *new)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return 0;
+
+ if (!kernfs_get_active(of->kn))
+ return -EINVAL;
+
+ ret = 0;
+ if (of->vm_ops->set_policy)
+ ret = of->vm_ops->set_policy(vma, new);
+
+ kernfs_put_active(of->kn);
+ return ret;
+}
+
+static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+ struct mempolicy *pol;
+
+ if (!of->vm_ops)
+ return vma->vm_policy;
+
+ if (!kernfs_get_active(of->kn))
+ return vma->vm_policy;
+
+ pol = vma->vm_policy;
+ if (of->vm_ops->get_policy)
+ pol = of->vm_ops->get_policy(vma, addr);
+
+ kernfs_put_active(of->kn);
+ return pol;
+}
+
+static int kernfs_vma_migrate(struct vm_area_struct *vma,
+ const nodemask_t *from, const nodemask_t *to,
+ unsigned long flags)
+{
+ struct file *file = vma->vm_file;
+ struct kernfs_open_file *of = kernfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return 0;
+
+ if (!kernfs_get_active(of->kn))
+ return 0;
+
+ ret = 0;
+ if (of->vm_ops->migrate)
+ ret = of->vm_ops->migrate(vma, from, to, flags);
+
+ kernfs_put_active(of->kn);
+ return ret;
+}
+#endif
+
+static const struct vm_operations_struct kernfs_vm_ops = {
+ .open = kernfs_vma_open,
+ .fault = kernfs_vma_fault,
+ .page_mkwrite = kernfs_vma_page_mkwrite,
+ .access = kernfs_vma_access,
+#ifdef CONFIG_NUMA
+ .set_policy = kernfs_vma_set_policy,
+ .get_policy = kernfs_vma_get_policy,
+ .migrate = kernfs_vma_migrate,
+#endif
+};
+
+static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct kernfs_open_file *of = kernfs_of(file);
+ const struct kernfs_ops *ops;
+ int rc;
+
+ /*
+ * mmap path and of->mutex are prone to triggering spurious lockdep
+ * warnings and we don't want to add spurious locking dependency
+ * between the two. Check whether mmap is actually implemented
+ * without grabbing @of->mutex by testing HAS_MMAP flag. See the
+ * comment in kernfs_file_open() for more details.
+ */
+ if (!(of->kn->flags & KERNFS_HAS_MMAP))
+ return -ENODEV;
+
+ mutex_lock(&of->mutex);
+
+ rc = -ENODEV;
+ if (!kernfs_get_active(of->kn))
+ goto out_unlock;
+
+ ops = kernfs_ops(of->kn);
+ rc = ops->mmap(of, vma);
+
+ /*
+ * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
+ * to satisfy versions of X which crash if the mmap fails: that
+ * substitutes a new vm_file, and we don't then want bin_vm_ops.
+ */
+ if (vma->vm_file != file)
+ goto out_put;
+
+ rc = -EINVAL;
+ if (of->mmapped && of->vm_ops != vma->vm_ops)
+ goto out_put;
+
+ /*
+ * It is not possible to successfully wrap close.
+ * So error if someone is trying to use close.
+ */
+ rc = -EINVAL;
+ if (vma->vm_ops && vma->vm_ops->close)
+ goto out_put;
+
+ rc = 0;
+ of->mmapped = 1;
+ of->vm_ops = vma->vm_ops;
+ vma->vm_ops = &kernfs_vm_ops;
+out_put:
+ kernfs_put_active(of->kn);
+out_unlock:
+ mutex_unlock(&of->mutex);
+
+ return rc;
+}
+
+/**
+ * kernfs_get_open_node - get or create kernfs_open_node
+ * @kn: target kernfs_node
+ * @of: kernfs_open_file for this instance of open
+ *
+ * If @kn->attr.open exists, increment its reference count; otherwise,
+ * create one. @of is chained to the files list.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int kernfs_get_open_node(struct kernfs_node *kn,
+ struct kernfs_open_file *of)
+{
+ struct kernfs_open_node *on, *new_on = NULL;
+
+ retry:
+ mutex_lock(&kernfs_open_file_mutex);
+ spin_lock_irq(&kernfs_open_node_lock);
+
+ if (!kn->attr.open && new_on) {
+ kn->attr.open = new_on;
+ new_on = NULL;
+ }
+
+ on = kn->attr.open;
+ if (on) {
+ atomic_inc(&on->refcnt);
+ list_add_tail(&of->list, &on->files);
+ }
+
+ spin_unlock_irq(&kernfs_open_node_lock);
+ mutex_unlock(&kernfs_open_file_mutex);
+
+ if (on) {
+ kfree(new_on);
+ return 0;
+ }
+
+ /* not there, initialize a new one and retry */
+ new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
+ if (!new_on)
+ return -ENOMEM;
+
+ atomic_set(&new_on->refcnt, 0);
+ atomic_set(&new_on->event, 1);
+ init_waitqueue_head(&new_on->poll);
+ INIT_LIST_HEAD(&new_on->files);
+ goto retry;
+}
+
+/**
+ * kernfs_put_open_node - put kernfs_open_node
+ * @kn: target kernfs_nodet
+ * @of: associated kernfs_open_file
+ *
+ * Put @kn->attr.open and unlink @of from the files list. If
+ * reference count reaches zero, disassociate and free it.
+ *
+ * LOCKING:
+ * None.
+ */
+static void kernfs_put_open_node(struct kernfs_node *kn,
+ struct kernfs_open_file *of)
+{
+ struct kernfs_open_node *on = kn->attr.open;
+ unsigned long flags;
+
+ mutex_lock(&kernfs_open_file_mutex);
+ spin_lock_irqsave(&kernfs_open_node_lock, flags);
+
+ if (of)
+ list_del(&of->list);
+
+ if (atomic_dec_and_test(&on->refcnt))
+ kn->attr.open = NULL;
+ else
+ on = NULL;
+
+ spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
+ mutex_unlock(&kernfs_open_file_mutex);
+
+ kfree(on);
+}
+
+static int kernfs_fop_open(struct inode *inode, struct file *file)
+{
+ struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
+ const struct kernfs_ops *ops;
+ struct kernfs_open_file *of;
+ bool has_read, has_write, has_mmap;
+ int error = -EACCES;
+
+ if (!kernfs_get_active(kn))
+ return -ENODEV;
+
+ ops = kernfs_ops(kn);
+
+ has_read = ops->seq_show || ops->read || ops->mmap;
+ has_write = ops->write || ops->mmap;
+ has_mmap = ops->mmap;
+
+ /* check perms and supported operations */
+ if ((file->f_mode & FMODE_WRITE) &&
+ (!(inode->i_mode & S_IWUGO) || !has_write))
+ goto err_out;
+
+ if ((file->f_mode & FMODE_READ) &&
+ (!(inode->i_mode & S_IRUGO) || !has_read))
+ goto err_out;
+
+ /* allocate a kernfs_open_file for the file */
+ error = -ENOMEM;
+ of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
+ if (!of)
+ goto err_out;
+
+ /*
+ * The following is done to give a different lockdep key to
+ * @of->mutex for files which implement mmap. This is a rather
+ * crude way to avoid false positive lockdep warning around
+ * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
+ * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
+ * which mm->mmap_sem nests, while holding @of->mutex. As each
+ * open file has a separate mutex, it's okay as long as those don't
+ * happen on the same file. At this point, we can't easily give
+ * each file a separate locking class. Let's differentiate on
+ * whether the file has mmap or not for now.
+ *
+ * Both paths of the branch look the same. They're supposed to
+ * look that way and give @of->mutex different static lockdep keys.
+ */
+ if (has_mmap)
+ mutex_init(&of->mutex);
+ else
+ mutex_init(&of->mutex);
+
+ of->kn = kn;
+ of->file = file;
+
+ /*
+ * Always instantiate seq_file even if read access doesn't use
+ * seq_file or is not requested. This unifies private data access
+ * and readable regular files are the vast majority anyway.
+ */
+ if (ops->seq_show)
+ error = seq_open(file, &kernfs_seq_ops);
+ else
+ error = seq_open(file, NULL);
+ if (error)
+ goto err_free;
+
+ ((struct seq_file *)file->private_data)->private = of;
+
+ /* seq_file clears PWRITE unconditionally, restore it if WRITE */
+ if (file->f_mode & FMODE_WRITE)
+ file->f_mode |= FMODE_PWRITE;
+
+ /* make sure we have open node struct */
+ error = kernfs_get_open_node(kn, of);
+ if (error)
+ goto err_close;
+
+ /* open succeeded, put active references */
+ kernfs_put_active(kn);
+ return 0;
+
+err_close:
+ seq_release(inode, file);
+err_free:
+ kfree(of);
+err_out:
+ kernfs_put_active(kn);
+ return error;
+}
+
+static int kernfs_fop_release(struct inode *inode, struct file *filp)
+{
+ struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
+ struct kernfs_open_file *of = kernfs_of(filp);
+
+ kernfs_put_open_node(kn, of);
+ seq_release(inode, filp);
+ kfree(of);
+
+ return 0;
+}
+
+void kernfs_unmap_bin_file(struct kernfs_node *kn)
+{
+ struct kernfs_open_node *on;
+ struct kernfs_open_file *of;
+
+ if (!(kn->flags & KERNFS_HAS_MMAP))
+ return;
+
+ spin_lock_irq(&kernfs_open_node_lock);
+ on = kn->attr.open;
+ if (on)
+ atomic_inc(&on->refcnt);
+ spin_unlock_irq(&kernfs_open_node_lock);
+ if (!on)
+ return;
+
+ mutex_lock(&kernfs_open_file_mutex);
+ list_for_each_entry(of, &on->files, list) {
+ struct inode *inode = file_inode(of->file);
+ unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+ }
+ mutex_unlock(&kernfs_open_file_mutex);
+
+ kernfs_put_open_node(kn, NULL);
+}
+
+/*
+ * Kernfs attribute files are pollable. The idea is that you read
+ * the content and then you use 'poll' or 'select' to wait for
+ * the content to change. When the content changes (assuming the
+ * manager for the kobject supports notification), poll will
+ * return POLLERR|POLLPRI, and select will return the fd whether
+ * it is waiting for read, write, or exceptions.
+ * Once poll/select indicates that the value has changed, you
+ * need to close and re-open the file, or seek to 0 and read again.
+ * Reminder: this only works for attributes which actively support
+ * it, and it is not possible to test an attribute from userspace
+ * to see if it supports poll (Neither 'poll' nor 'select' return
+ * an appropriate error code). When in doubt, set a suitable timeout value.
+ */
+static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
+{
+ struct kernfs_open_file *of = kernfs_of(filp);
+ struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
+ struct kernfs_open_node *on = kn->attr.open;
+
+ /* need parent for the kobj, grab both */
+ if (!kernfs_get_active(kn))
+ goto trigger;
+
+ poll_wait(filp, &on->poll, wait);
+
+ kernfs_put_active(kn);
+
+ if (of->event != atomic_read(&on->event))
+ goto trigger;
+
+ return DEFAULT_POLLMASK;
+
+ trigger:
+ return DEFAULT_POLLMASK|POLLERR|POLLPRI;
+}
+
+/**
+ * kernfs_notify - notify a kernfs file
+ * @kn: file to notify
+ *
+ * Notify @kn such that poll(2) on @kn wakes up.
+ */
+void kernfs_notify(struct kernfs_node *kn)
+{
+ struct kernfs_open_node *on;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kernfs_open_node_lock, flags);
+
+ if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) {
+ on = kn->attr.open;
+ if (on) {
+ atomic_inc(&on->event);
+ wake_up_interruptible(&on->poll);
+ }
+ }
+
+ spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
+}
+EXPORT_SYMBOL_GPL(kernfs_notify);
+
+const struct file_operations kernfs_file_fops = {
+ .read = kernfs_fop_read,
+ .write = kernfs_fop_write,
+ .llseek = generic_file_llseek,
+ .mmap = kernfs_fop_mmap,
+ .open = kernfs_fop_open,
+ .release = kernfs_fop_release,
+ .poll = kernfs_fop_poll,
+};
+
+/**
+ * __kernfs_create_file - kernfs internal function to create a file
+ * @parent: directory to create the file in
+ * @name: name of the file
+ * @mode: mode of the file
+ * @size: size of the file
+ * @ops: kernfs operations for the file
+ * @priv: private data for the file
+ * @ns: optional namespace tag of the file
+ * @static_name: don't copy file name
+ * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
+ *
+ * Returns the created node on success, ERR_PTR() value on error.
+ */
+struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
+ const char *name,
+ umode_t mode, loff_t size,
+ const struct kernfs_ops *ops,
+ void *priv, const void *ns,
+ bool name_is_static,
+ struct lock_class_key *key)
+{
+ struct kernfs_addrm_cxt acxt;
+ struct kernfs_node *kn;
+ unsigned flags;
+ int rc;
+
+ flags = KERNFS_FILE;
+ if (name_is_static)
+ flags |= KERNFS_STATIC_NAME;
+
+ kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
+ if (!kn)
+ return ERR_PTR(-ENOMEM);
+
+ kn->attr.ops = ops;
+ kn->attr.size = size;
+ kn->ns = ns;
+ kn->priv = priv;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (key) {
+ lockdep_init_map(&kn->dep_map, "s_active", key, 0);
+ kn->flags |= KERNFS_LOCKDEP;
+ }
+#endif
+
+ /*
+ * kn->attr.ops is accesible only while holding active ref. We
+ * need to know whether some ops are implemented outside active
+ * ref. Cache their existence in flags.
+ */
+ if (ops->seq_show)
+ kn->flags |= KERNFS_HAS_SEQ_SHOW;
+ if (ops->mmap)
+ kn->flags |= KERNFS_HAS_MMAP;
+
+ kernfs_addrm_start(&acxt);
+ rc = kernfs_add_one(&acxt, kn);
+ kernfs_addrm_finish(&acxt);
+
+ if (rc) {
+ kernfs_put(kn);
+ return ERR_PTR(rc);
+ }
+ return kn;
+}
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
new file mode 100644
index 0000000..e55126f
--- /dev/null
+++ b/fs/kernfs/inode.c
@@ -0,0 +1,377 @@
+/*
+ * fs/kernfs/inode.c - kernfs inode implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/backing-dev.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/security.h>
+
+#include "kernfs-internal.h"
+
+static const struct address_space_operations kernfs_aops = {
+ .readpage = simple_readpage,
+ .write_begin = simple_write_begin,
+ .write_end = simple_write_end,
+};
+
+static struct backing_dev_info kernfs_bdi = {
+ .name = "kernfs",
+ .ra_pages = 0, /* No readahead */
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+
+static const struct inode_operations kernfs_iops = {
+ .permission = kernfs_iop_permission,
+ .setattr = kernfs_iop_setattr,
+ .getattr = kernfs_iop_getattr,
+ .setxattr = kernfs_iop_setxattr,
+ .removexattr = kernfs_iop_removexattr,
+ .getxattr = kernfs_iop_getxattr,
+ .listxattr = kernfs_iop_listxattr,
+};
+
+void __init kernfs_inode_init(void)
+{
+ if (bdi_init(&kernfs_bdi))
+ panic("failed to init kernfs_bdi");
+}
+
+static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
+{
+ struct iattr *iattrs;
+
+ if (kn->iattr)
+ return kn->iattr;
+
+ kn->iattr = kzalloc(sizeof(struct kernfs_iattrs), GFP_KERNEL);
+ if (!kn->iattr)
+ return NULL;
+ iattrs = &kn->iattr->ia_iattr;
+
+ /* assign default attributes */
+ iattrs->ia_mode = kn->mode;
+ iattrs->ia_uid = GLOBAL_ROOT_UID;
+ iattrs->ia_gid = GLOBAL_ROOT_GID;
+ iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME;
+
+ simple_xattrs_init(&kn->iattr->xattrs);
+
+ return kn->iattr;
+}
+
+static int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
+{
+ struct kernfs_iattrs *attrs;
+ struct iattr *iattrs;
+ unsigned int ia_valid = iattr->ia_valid;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ iattrs = &attrs->ia_iattr;
+
+ if (ia_valid & ATTR_UID)
+ iattrs->ia_uid = iattr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ iattrs->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ iattrs->ia_atime = iattr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ iattrs->ia_mtime = iattr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ iattrs->ia_ctime = iattr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+ iattrs->ia_mode = kn->mode = mode;
+ }
+ return 0;
+}
+
+/**
+ * kernfs_setattr - set iattr on a node
+ * @kn: target node
+ * @iattr: iattr to set
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
+{
+ int ret;
+
+ mutex_lock(&kernfs_mutex);
+ ret = __kernfs_setattr(kn, iattr);
+ mutex_unlock(&kernfs_mutex);
+ return ret;
+}
+
+int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct kernfs_node *kn = dentry->d_fsdata;
+ int error;
+
+ if (!kn)
+ return -EINVAL;
+
+ mutex_lock(&kernfs_mutex);
+ error = inode_change_ok(inode, iattr);
+ if (error)
+ goto out;
+
+ error = __kernfs_setattr(kn, iattr);
+ if (error)
+ goto out;
+
+ /* this ignores size changes */
+ setattr_copy(inode, iattr);
+
+out:
+ mutex_unlock(&kernfs_mutex);
+ return error;
+}
+
+static int kernfs_node_setsecdata(struct kernfs_node *kn, void **secdata,
+ u32 *secdata_len)
+{
+ struct kernfs_iattrs *attrs;
+ void *old_secdata;
+ size_t old_secdata_len;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ old_secdata = attrs->ia_secdata;
+ old_secdata_len = attrs->ia_secdata_len;
+
+ attrs->ia_secdata = *secdata;
+ attrs->ia_secdata_len = *secdata_len;
+
+ *secdata = old_secdata;
+ *secdata_len = old_secdata_len;
+ return 0;
+}
+
+int kernfs_iop_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_iattrs *attrs;
+ void *secdata;
+ int error;
+ u32 secdata_len = 0;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
+ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+ error = security_inode_setsecurity(dentry->d_inode, suffix,
+ value, size, flags);
+ if (error)
+ return error;
+ error = security_inode_getsecctx(dentry->d_inode,
+ &secdata, &secdata_len);
+ if (error)
+ return error;
+
+ mutex_lock(&kernfs_mutex);
+ error = kernfs_node_setsecdata(kn, &secdata, &secdata_len);
+ mutex_unlock(&kernfs_mutex);
+
+ if (secdata)
+ security_release_secctx(secdata, secdata_len);
+ return error;
+ } else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
+ return simple_xattr_set(&attrs->xattrs, name, value, size,
+ flags);
+ }
+
+ return -EINVAL;
+}
+
+int kernfs_iop_removexattr(struct dentry *dentry, const char *name)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_iattrs *attrs;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ return simple_xattr_remove(&attrs->xattrs, name);
+}
+
+ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
+ size_t size)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_iattrs *attrs;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ return simple_xattr_get(&attrs->xattrs, name, buf, size);
+}
+
+ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_iattrs *attrs;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ return simple_xattr_list(&attrs->xattrs, buf, size);
+}
+
+static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
+{
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+}
+
+static inline void set_inode_attr(struct inode *inode, struct iattr *iattr)
+{
+ inode->i_uid = iattr->ia_uid;
+ inode->i_gid = iattr->ia_gid;
+ inode->i_atime = iattr->ia_atime;
+ inode->i_mtime = iattr->ia_mtime;
+ inode->i_ctime = iattr->ia_ctime;
+}
+
+static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
+{
+ struct kernfs_iattrs *attrs = kn->iattr;
+
+ inode->i_mode = kn->mode;
+ if (attrs) {
+ /*
+ * kernfs_node has non-default attributes get them from
+ * persistent copy in kernfs_node.
+ */
+ set_inode_attr(inode, &attrs->ia_iattr);
+ security_inode_notifysecctx(inode, attrs->ia_secdata,
+ attrs->ia_secdata_len);
+ }
+
+ if (kernfs_type(kn) == KERNFS_DIR)
+ set_nlink(inode, kn->dir.subdirs + 2);
+}
+
+int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct inode *inode = dentry->d_inode;
+
+ mutex_lock(&kernfs_mutex);
+ kernfs_refresh_inode(kn, inode);
+ mutex_unlock(&kernfs_mutex);
+
+ generic_fillattr(inode, stat);
+ return 0;
+}
+
+static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
+{
+ kernfs_get(kn);
+ inode->i_private = kn;
+ inode->i_mapping->a_ops = &kernfs_aops;
+ inode->i_mapping->backing_dev_info = &kernfs_bdi;
+ inode->i_op = &kernfs_iops;
+
+ set_default_inode_attr(inode, kn->mode);
+ kernfs_refresh_inode(kn, inode);
+
+ /* initialize inode according to type */
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ inode->i_op = &kernfs_dir_iops;
+ inode->i_fop = &kernfs_dir_fops;
+ break;
+ case KERNFS_FILE:
+ inode->i_size = kn->attr.size;
+ inode->i_fop = &kernfs_file_fops;
+ break;
+ case KERNFS_LINK:
+ inode->i_op = &kernfs_symlink_iops;
+ break;
+ default:
+ BUG();
+ }
+
+ unlock_new_inode(inode);
+}
+
+/**
+ * kernfs_get_inode - get inode for kernfs_node
+ * @sb: super block
+ * @kn: kernfs_node to allocate inode for
+ *
+ * Get inode for @kn. If such inode doesn't exist, a new inode is
+ * allocated and basics are initialized. New inode is returned
+ * locked.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * Pointer to allocated inode on success, NULL on failure.
+ */
+struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
+{
+ struct inode *inode;
+
+ inode = iget_locked(sb, kn->ino);
+ if (inode && (inode->i_state & I_NEW))
+ kernfs_init_inode(kn, inode);
+
+ return inode;
+}
+
+/*
+ * The kernfs_node serves as both an inode and a directory entry for
+ * kernfs. To prevent the kernfs inode numbers from being freed
+ * prematurely we take a reference to kernfs_node from the kernfs inode. A
+ * super_operations.evict_inode() implementation is needed to drop that
+ * reference upon inode destruction.
+ */
+void kernfs_evict_inode(struct inode *inode)
+{
+ struct kernfs_node *kn = inode->i_private;
+
+ truncate_inode_pages(&inode->i_data, 0);
+ clear_inode(inode);
+ kernfs_put(kn);
+}
+
+int kernfs_iop_permission(struct inode *inode, int mask)
+{
+ struct kernfs_node *kn;
+
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+
+ kn = inode->i_private;
+
+ mutex_lock(&kernfs_mutex);
+ kernfs_refresh_inode(kn, inode);
+ mutex_unlock(&kernfs_mutex);
+
+ return generic_permission(inode, mask);
+}
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
new file mode 100644
index 0000000..eb536b7
--- /dev/null
+++ b/fs/kernfs/kernfs-internal.h
@@ -0,0 +1,122 @@
+/*
+ * fs/kernfs/kernfs-internal.h - kernfs internal header file
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007, 2013 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef __KERNFS_INTERNAL_H
+#define __KERNFS_INTERNAL_H
+
+#include <linux/lockdep.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/xattr.h>
+
+#include <linux/kernfs.h>
+
+struct kernfs_iattrs {
+ struct iattr ia_iattr;
+ void *ia_secdata;
+ u32 ia_secdata_len;
+
+ struct simple_xattrs xattrs;
+};
+
+#define KN_DEACTIVATED_BIAS INT_MIN
+
+/* KERNFS_TYPE_MASK and types are defined in include/linux/kernfs.h */
+
+/**
+ * kernfs_root - find out the kernfs_root a kernfs_node belongs to
+ * @kn: kernfs_node of interest
+ *
+ * Return the kernfs_root @kn belongs to.
+ */
+static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
+{
+ /* if parent exists, it's always a dir; otherwise, @sd is a dir */
+ if (kn->parent)
+ kn = kn->parent;
+ return kn->dir.root;
+}
+
+/*
+ * Context structure to be used while adding/removing nodes.
+ */
+struct kernfs_addrm_cxt {
+ struct kernfs_node *removed;
+};
+
+/*
+ * mount.c
+ */
+struct kernfs_super_info {
+ /*
+ * The root associated with this super_block. Each super_block is
+ * identified by the root and ns it's associated with.
+ */
+ struct kernfs_root *root;
+
+ /*
+ * Each sb is associated with one namespace tag, currently the
+ * network namespace of the task which mounted this kernfs
+ * instance. If multiple tags become necessary, make the following
+ * an array and compare kernfs_node tag against every entry.
+ */
+ const void *ns;
+};
+#define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info))
+
+extern struct kmem_cache *kernfs_node_cache;
+
+/*
+ * inode.c
+ */
+struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
+void kernfs_evict_inode(struct inode *inode);
+int kernfs_iop_permission(struct inode *inode, int mask);
+int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
+int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
+int kernfs_iop_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags);
+int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
+ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
+ size_t size);
+ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
+void kernfs_inode_init(void);
+
+/*
+ * dir.c
+ */
+extern struct mutex kernfs_mutex;
+extern const struct dentry_operations kernfs_dops;
+extern const struct file_operations kernfs_dir_fops;
+extern const struct inode_operations kernfs_dir_iops;
+
+struct kernfs_node *kernfs_get_active(struct kernfs_node *kn);
+void kernfs_put_active(struct kernfs_node *kn);
+void kernfs_addrm_start(struct kernfs_addrm_cxt *acxt);
+int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn);
+void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt);
+struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
+ const char *name, umode_t mode,
+ unsigned flags);
+
+/*
+ * file.c
+ */
+extern const struct file_operations kernfs_file_fops;
+
+void kernfs_unmap_bin_file(struct kernfs_node *kn);
+
+/*
+ * symlink.c
+ */
+extern const struct inode_operations kernfs_symlink_iops;
+
+#endif /* __KERNFS_INTERNAL_H */
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
new file mode 100644
index 0000000..0d6ce89
--- /dev/null
+++ b/fs/kernfs/mount.c
@@ -0,0 +1,165 @@
+/*
+ * fs/kernfs/mount.c - kernfs mount implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/init.h>
+#include <linux/magic.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+
+#include "kernfs-internal.h"
+
+struct kmem_cache *kernfs_node_cache;
+
+static const struct super_operations kernfs_sops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .evict_inode = kernfs_evict_inode,
+};
+
+static int kernfs_fill_super(struct super_block *sb)
+{
+ struct kernfs_super_info *info = kernfs_info(sb);
+ struct inode *inode;
+ struct dentry *root;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = SYSFS_MAGIC;
+ sb->s_op = &kernfs_sops;
+ sb->s_time_gran = 1;
+
+ /* get root inode, initialize and unlock it */
+ mutex_lock(&kernfs_mutex);
+ inode = kernfs_get_inode(sb, info->root->kn);
+ mutex_unlock(&kernfs_mutex);
+ if (!inode) {
+ pr_debug("kernfs: could not get root inode\n");
+ return -ENOMEM;
+ }
+
+ /* instantiate and link root dentry */
+ root = d_make_root(inode);
+ if (!root) {
+ pr_debug("%s: could not get root dentry!\n", __func__);
+ return -ENOMEM;
+ }
+ kernfs_get(info->root->kn);
+ root->d_fsdata = info->root->kn;
+ sb->s_root = root;
+ sb->s_d_op = &kernfs_dops;
+ return 0;
+}
+
+static int kernfs_test_super(struct super_block *sb, void *data)
+{
+ struct kernfs_super_info *sb_info = kernfs_info(sb);
+ struct kernfs_super_info *info = data;
+
+ return sb_info->root == info->root && sb_info->ns == info->ns;
+}
+
+static int kernfs_set_super(struct super_block *sb, void *data)
+{
+ int error;
+ error = set_anon_super(sb, data);
+ if (!error)
+ sb->s_fs_info = data;
+ return error;
+}
+
+/**
+ * kernfs_super_ns - determine the namespace tag of a kernfs super_block
+ * @sb: super_block of interest
+ *
+ * Return the namespace tag associated with kernfs super_block @sb.
+ */
+const void *kernfs_super_ns(struct super_block *sb)
+{
+ struct kernfs_super_info *info = kernfs_info(sb);
+
+ return info->ns;
+}
+
+/**
+ * kernfs_mount_ns - kernfs mount helper
+ * @fs_type: file_system_type of the fs being mounted
+ * @flags: mount flags specified for the mount
+ * @root: kernfs_root of the hierarchy being mounted
+ * @ns: optional namespace tag of the mount
+ *
+ * This is to be called from each kernfs user's file_system_type->mount()
+ * implementation, which should pass through the specified @fs_type and
+ * @flags, and specify the hierarchy and namespace tag to mount via @root
+ * and @ns, respectively.
+ *
+ * The return value can be passed to the vfs layer verbatim.
+ */
+struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root, const void *ns)
+{
+ struct super_block *sb;
+ struct kernfs_super_info *info;
+ int error;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->root = root;
+ info->ns = ns;
+
+ sb = sget(fs_type, kernfs_test_super, kernfs_set_super, flags, info);
+ if (IS_ERR(sb) || sb->s_fs_info != info)
+ kfree(info);
+ if (IS_ERR(sb))
+ return ERR_CAST(sb);
+ if (!sb->s_root) {
+ error = kernfs_fill_super(sb);
+ if (error) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(error);
+ }
+ sb->s_flags |= MS_ACTIVE;
+ }
+
+ return dget(sb->s_root);
+}
+
+/**
+ * kernfs_kill_sb - kill_sb for kernfs
+ * @sb: super_block being killed
+ *
+ * This can be used directly for file_system_type->kill_sb(). If a kernfs
+ * user needs extra cleanup, it can implement its own kill_sb() and call
+ * this function at the end.
+ */
+void kernfs_kill_sb(struct super_block *sb)
+{
+ struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_node *root_kn = sb->s_root->d_fsdata;
+
+ /*
+ * Remove the superblock from fs_supers/s_instances
+ * so we can't find it, before freeing kernfs_super_info.
+ */
+ kill_anon_super(sb);
+ kfree(info);
+ kernfs_put(root_kn);
+}
+
+void __init kernfs_init(void)
+{
+ kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
+ sizeof(struct kernfs_node),
+ 0, SLAB_PANIC, NULL);
+ kernfs_inode_init();
+}
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
new file mode 100644
index 0000000..4d45705
--- /dev/null
+++ b/fs/kernfs/symlink.c
@@ -0,0 +1,151 @@
+/*
+ * fs/kernfs/symlink.c - kernfs symlink implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/namei.h>
+
+#include "kernfs-internal.h"
+
+/**
+ * kernfs_create_link - create a symlink
+ * @parent: directory to create the symlink in
+ * @name: name of the symlink
+ * @target: target node for the symlink to point to
+ *
+ * Returns the created node on success, ERR_PTR() value on error.
+ */
+struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
+ const char *name,
+ struct kernfs_node *target)
+{
+ struct kernfs_node *kn;
+ struct kernfs_addrm_cxt acxt;
+ int error;
+
+ kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, KERNFS_LINK);
+ if (!kn)
+ return ERR_PTR(-ENOMEM);
+
+ if (kernfs_ns_enabled(parent))
+ kn->ns = target->ns;
+ kn->symlink.target_kn = target;
+ kernfs_get(target); /* ref owned by symlink */
+
+ kernfs_addrm_start(&acxt);
+ error = kernfs_add_one(&acxt, kn);
+ kernfs_addrm_finish(&acxt);
+
+ if (!error)
+ return kn;
+
+ kernfs_put(kn);
+ return ERR_PTR(error);
+}
+
+static int kernfs_get_target_path(struct kernfs_node *parent,
+ struct kernfs_node *target, char *path)
+{
+ struct kernfs_node *base, *kn;
+ char *s = path;
+ int len = 0;
+
+ /* go up to the root, stop at the base */
+ base = parent;
+ while (base->parent) {
+ kn = target->parent;
+ while (kn->parent && base != kn)
+ kn = kn->parent;
+
+ if (base == kn)
+ break;
+
+ strcpy(s, "../");
+ s += 3;
+ base = base->parent;
+ }
+
+ /* determine end of target string for reverse fillup */
+ kn = target;
+ while (kn->parent && kn != base) {
+ len += strlen(kn->name) + 1;
+ kn = kn->parent;
+ }
+
+ /* check limits */
+ if (len < 2)
+ return -EINVAL;
+ len--;
+ if ((s - path) + len > PATH_MAX)
+ return -ENAMETOOLONG;
+
+ /* reverse fillup of target string from target to base */
+ kn = target;
+ while (kn->parent && kn != base) {
+ int slen = strlen(kn->name);
+
+ len -= slen;
+ strncpy(s + len, kn->name, slen);
+ if (len)
+ s[--len] = '/';
+
+ kn = kn->parent;
+ }
+
+ return 0;
+}
+
+static int kernfs_getlink(struct dentry *dentry, char *path)
+{
+ struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_node *parent = kn->parent;
+ struct kernfs_node *target = kn->symlink.target_kn;
+ int error;
+
+ mutex_lock(&kernfs_mutex);
+ error = kernfs_get_target_path(parent, target, path);
+ mutex_unlock(&kernfs_mutex);
+
+ return error;
+}
+
+static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ int error = -ENOMEM;
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+ if (page) {
+ error = kernfs_getlink(dentry, (char *) page);
+ if (error < 0)
+ free_page((unsigned long)page);
+ }
+ nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
+ return NULL;
+}
+
+static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ char *page = nd_get_link(nd);
+ if (!IS_ERR(page))
+ free_page((unsigned long)page);
+}
+
+const struct inode_operations kernfs_symlink_iops = {
+ .setxattr = kernfs_iop_setxattr,
+ .removexattr = kernfs_iop_removexattr,
+ .getxattr = kernfs_iop_getxattr,
+ .listxattr = kernfs_iop_listxattr,
+ .readlink = generic_readlink,
+ .follow_link = kernfs_iop_follow_link,
+ .put_link = kernfs_iop_put_link,
+ .setattr = kernfs_iop_setattr,
+ .getattr = kernfs_iop_getattr,
+ .permission = kernfs_iop_permission,
+};
diff --git a/fs/namespace.c b/fs/namespace.c
index ac2ce8a..22e5367 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2790,6 +2790,8 @@ void __init mnt_init(void)
for (u = 0; u < HASH_SIZE; u++)
INIT_LIST_HEAD(&mountpoint_hashtable[u]);
+ kernfs_init();
+
err = sysfs_init();
if (err)
printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2886,7 +2888,7 @@ bool fs_fully_visible(struct file_system_type *type)
struct inode *inode = child->mnt_mountpoint->d_inode;
if (!S_ISDIR(inode->i_mode))
goto next;
- if (inode->i_nlink != 2)
+ if (inode->i_nlink > 2)
goto next;
}
visible = true;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 9f6b486..a1a1916 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nilfs_clear_logs(&sci->sc_segbufs);
- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
- if (unlikely(err))
- return err;
-
if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
sci->sc_freesegs,
sci->sc_nfreesegs,
NULL);
WARN_ON(err); /* do not happen */
+ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
}
+
+ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+ if (unlikely(err))
+ return err;
+
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}
diff --git a/fs/sysfs/Makefile b/fs/sysfs/Makefile
index 8876ac1..6eff6e1 100644
--- a/fs/sysfs/Makefile
+++ b/fs/sysfs/Makefile
@@ -2,4 +2,4 @@
# Makefile for the sysfs virtual filesystem
#
-obj-y := inode.o file.o dir.o symlink.o mount.o group.o
+obj-y := file.o dir.o symlink.o mount.o group.o
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 5e73d66..ee0d761 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -13,465 +13,31 @@
#undef DEBUG
#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/module.h>
#include <linux/kobject.h>
-#include <linux/namei.h>
-#include <linux/idr.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/security.h>
-#include <linux/hash.h>
#include "sysfs.h"
-DEFINE_MUTEX(sysfs_mutex);
DEFINE_SPINLOCK(sysfs_symlink_target_lock);
-#define to_sysfs_dirent(X) rb_entry((X), struct sysfs_dirent, s_rb)
-
-static DEFINE_SPINLOCK(sysfs_ino_lock);
-static DEFINE_IDA(sysfs_ino_ida);
-
-/**
- * sysfs_name_hash
- * @name: Null terminated string to hash
- * @ns: Namespace tag to hash
- *
- * Returns 31 bit hash of ns + name (so it fits in an off_t )
- */
-static unsigned int sysfs_name_hash(const char *name, const void *ns)
-{
- unsigned long hash = init_name_hash();
- unsigned int len = strlen(name);
- while (len--)
- hash = partial_name_hash(*name++, hash);
- hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
- hash &= 0x7fffffffU;
- /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
- if (hash < 1)
- hash += 2;
- if (hash >= INT_MAX)
- hash = INT_MAX - 1;
- return hash;
-}
-
-static int sysfs_name_compare(unsigned int hash, const char *name,
- const void *ns, const struct sysfs_dirent *sd)
-{
- if (hash != sd->s_hash)
- return hash - sd->s_hash;
- if (ns != sd->s_ns)
- return ns - sd->s_ns;
- return strcmp(name, sd->s_name);
-}
-
-static int sysfs_sd_compare(const struct sysfs_dirent *left,
- const struct sysfs_dirent *right)
-{
- return sysfs_name_compare(left->s_hash, left->s_name, left->s_ns,
- right);
-}
-
-/**
- * sysfs_link_sibling - link sysfs_dirent into sibling rbtree
- * @sd: sysfs_dirent of interest
- *
- * Link @sd into its sibling rbtree which starts from
- * sd->s_parent->s_dir.children.
- *
- * Locking:
- * mutex_lock(sysfs_mutex)
- *
- * RETURNS:
- * 0 on susccess -EEXIST on failure.
- */
-static int sysfs_link_sibling(struct sysfs_dirent *sd)
-{
- struct rb_node **node = &sd->s_parent->s_dir.children.rb_node;
- struct rb_node *parent = NULL;
-
- if (sysfs_type(sd) == SYSFS_DIR)
- sd->s_parent->s_dir.subdirs++;
-
- while (*node) {
- struct sysfs_dirent *pos;
- int result;
-
- pos = to_sysfs_dirent(*node);
- parent = *node;
- result = sysfs_sd_compare(sd, pos);
- if (result < 0)
- node = &pos->s_rb.rb_left;
- else if (result > 0)
- node = &pos->s_rb.rb_right;
- else
- return -EEXIST;
- }
- /* add new node and rebalance the tree */
- rb_link_node(&sd->s_rb, parent, node);
- rb_insert_color(&sd->s_rb, &sd->s_parent->s_dir.children);
- return 0;
-}
-
-/**
- * sysfs_unlink_sibling - unlink sysfs_dirent from sibling rbtree
- * @sd: sysfs_dirent of interest
- *
- * Unlink @sd from its sibling rbtree which starts from
- * sd->s_parent->s_dir.children.
- *
- * Locking:
- * mutex_lock(sysfs_mutex)
- */
-static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
-{
- if (sysfs_type(sd) == SYSFS_DIR)
- sd->s_parent->s_dir.subdirs--;
-
- rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children);
-}
-
-/**
- * sysfs_get_active - get an active reference to sysfs_dirent
- * @sd: sysfs_dirent to get an active reference to
- *
- * Get an active reference of @sd. This function is noop if @sd
- * is NULL.
- *
- * RETURNS:
- * Pointer to @sd on success, NULL on failure.
- */
-struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd)
-{
- if (unlikely(!sd))
- return NULL;
-
- if (!atomic_inc_unless_negative(&sd->s_active))
- return NULL;
-
- if (likely(!sysfs_ignore_lockdep(sd)))
- rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_);
- return sd;
-}
-
-/**
- * sysfs_put_active - put an active reference to sysfs_dirent
- * @sd: sysfs_dirent to put an active reference to
- *
- * Put an active reference to @sd. This function is noop if @sd
- * is NULL.
- */
-void sysfs_put_active(struct sysfs_dirent *sd)
-{
- int v;
-
- if (unlikely(!sd))
- return;
-
- if (likely(!sysfs_ignore_lockdep(sd)))
- rwsem_release(&sd->dep_map, 1, _RET_IP_);
- v = atomic_dec_return(&sd->s_active);
- if (likely(v != SD_DEACTIVATED_BIAS))
- return;
-
- /* atomic_dec_return() is a mb(), we'll always see the updated
- * sd->u.completion.
- */
- complete(sd->u.completion);
-}
-
-/**
- * sysfs_deactivate - deactivate sysfs_dirent
- * @sd: sysfs_dirent to deactivate
- *
- * Deny new active references and drain existing ones.
- */
-static void sysfs_deactivate(struct sysfs_dirent *sd)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- int v;
-
- BUG_ON(!(sd->s_flags & SYSFS_FLAG_REMOVED));
-
- if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
- return;
-
- sd->u.completion = (void *)&wait;
-
- rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
- /* atomic_add_return() is a mb(), put_active() will always see
- * the updated sd->u.completion.
- */
- v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);
-
- if (v != SD_DEACTIVATED_BIAS) {
- lock_contended(&sd->dep_map, _RET_IP_);
- wait_for_completion(&wait);
- }
-
- lock_acquired(&sd->dep_map, _RET_IP_);
- rwsem_release(&sd->dep_map, 1, _RET_IP_);
-}
-
-static int sysfs_alloc_ino(unsigned int *pino)
-{
- int ino, rc;
-
- retry:
- spin_lock(&sysfs_ino_lock);
- rc = ida_get_new_above(&sysfs_ino_ida, 2, &ino);
- spin_unlock(&sysfs_ino_lock);
-
- if (rc == -EAGAIN) {
- if (ida_pre_get(&sysfs_ino_ida, GFP_KERNEL))
- goto retry;
- rc = -ENOMEM;
- }
-
- *pino = ino;
- return rc;
-}
-
-static void sysfs_free_ino(unsigned int ino)
-{
- spin_lock(&sysfs_ino_lock);
- ida_remove(&sysfs_ino_ida, ino);
- spin_unlock(&sysfs_ino_lock);
-}
-
-void release_sysfs_dirent(struct sysfs_dirent *sd)
-{
- struct sysfs_dirent *parent_sd;
-
- repeat:
- /* Moving/renaming is always done while holding reference.
- * sd->s_parent won't change beneath us.
- */
- parent_sd = sd->s_parent;
-
- WARN(!(sd->s_flags & SYSFS_FLAG_REMOVED),
- "sysfs: free using entry: %s/%s\n",
- parent_sd ? parent_sd->s_name : "", sd->s_name);
-
- if (sysfs_type(sd) == SYSFS_KOBJ_LINK)
- sysfs_put(sd->s_symlink.target_sd);
- if (sysfs_type(sd) & SYSFS_COPY_NAME)
- kfree(sd->s_name);
- if (sd->s_iattr && sd->s_iattr->ia_secdata)
- security_release_secctx(sd->s_iattr->ia_secdata,
- sd->s_iattr->ia_secdata_len);
- kfree(sd->s_iattr);
- sysfs_free_ino(sd->s_ino);
- kmem_cache_free(sysfs_dir_cachep, sd);
-
- sd = parent_sd;
- if (sd && atomic_dec_and_test(&sd->s_count))
- goto repeat;
-}
-
-static int sysfs_dentry_delete(const struct dentry *dentry)
-{
- struct sysfs_dirent *sd = dentry->d_fsdata;
- return !(sd && !(sd->s_flags & SYSFS_FLAG_REMOVED));
-}
-
-static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags)
-{
- struct sysfs_dirent *sd;
- int type;
-
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- sd = dentry->d_fsdata;
- mutex_lock(&sysfs_mutex);
-
- /* The sysfs dirent has been deleted */
- if (sd->s_flags & SYSFS_FLAG_REMOVED)
- goto out_bad;
-
- /* The sysfs dirent has been moved? */
- if (dentry->d_parent->d_fsdata != sd->s_parent)
- goto out_bad;
-
- /* The sysfs dirent has been renamed */
- if (strcmp(dentry->d_name.name, sd->s_name) != 0)
- goto out_bad;
-
- /* The sysfs dirent has been moved to a different namespace */
- type = KOBJ_NS_TYPE_NONE;
- if (sd->s_parent) {
- type = sysfs_ns_type(sd->s_parent);
- if (type != KOBJ_NS_TYPE_NONE &&
- sysfs_info(dentry->d_sb)->ns[type] != sd->s_ns)
- goto out_bad;
- }
-
- mutex_unlock(&sysfs_mutex);
-out_valid:
- return 1;
-out_bad:
- /* Remove the dentry from the dcache hashes.
- * If this is a deleted dentry we use d_drop instead of d_delete
- * so sysfs doesn't need to cope with negative dentries.
- *
- * If this is a dentry that has simply been renamed we
- * use d_drop to remove it from the dcache lookup on its
- * old parent. If this dentry persists later when a lookup
- * is performed at its new name the dentry will be readded
- * to the dcache hashes.
- */
- mutex_unlock(&sysfs_mutex);
-
- /* If we have submounts we must allow the vfs caches
- * to lie about the state of the filesystem to prevent
- * leaks and other nasty things.
- */
- if (check_submounts_and_drop(dentry) != 0)
- goto out_valid;
-
- return 0;
-}
-
-static void sysfs_dentry_release(struct dentry *dentry)
-{
- sysfs_put(dentry->d_fsdata);
-}
-
-const struct dentry_operations sysfs_dentry_ops = {
- .d_revalidate = sysfs_dentry_revalidate,
- .d_delete = sysfs_dentry_delete,
- .d_release = sysfs_dentry_release,
-};
-
-struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
-{
- char *dup_name = NULL;
- struct sysfs_dirent *sd;
-
- if (type & SYSFS_COPY_NAME) {
- name = dup_name = kstrdup(name, GFP_KERNEL);
- if (!name)
- return NULL;
- }
-
- sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL);
- if (!sd)
- goto err_out1;
-
- if (sysfs_alloc_ino(&sd->s_ino))
- goto err_out2;
-
- atomic_set(&sd->s_count, 1);
- atomic_set(&sd->s_active, 0);
-
- sd->s_name = name;
- sd->s_mode = mode;
- sd->s_flags = type | SYSFS_FLAG_REMOVED;
-
- return sd;
-
- err_out2:
- kmem_cache_free(sysfs_dir_cachep, sd);
- err_out1:
- kfree(dup_name);
- return NULL;
-}
-
-/**
- * sysfs_addrm_start - prepare for sysfs_dirent add/remove
- * @acxt: pointer to sysfs_addrm_cxt to be used
- *
- * This function is called when the caller is about to add or remove
- * sysfs_dirent. This function acquires sysfs_mutex. @acxt is used
- * to keep and pass context to other addrm functions.
- *
- * LOCKING:
- * Kernel thread context (may sleep). sysfs_mutex is locked on
- * return.
- */
-void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt)
- __acquires(sysfs_mutex)
-{
- memset(acxt, 0, sizeof(*acxt));
-
- mutex_lock(&sysfs_mutex);
-}
-
-/**
- * __sysfs_add_one - add sysfs_dirent to parent without warning
- * @acxt: addrm context to use
- * @sd: sysfs_dirent to be added
- * @parent_sd: the parent sysfs_dirent to add @sd to
- *
- * Get @parent_sd and set @sd->s_parent to it and increment nlink of
- * the parent inode if @sd is a directory and link into the children
- * list of the parent.
- *
- * This function should be called between calls to
- * sysfs_addrm_start() and sysfs_addrm_finish() and should be
- * passed the same @acxt as passed to sysfs_addrm_start().
- *
- * LOCKING:
- * Determined by sysfs_addrm_start().
- *
- * RETURNS:
- * 0 on success, -EEXIST if entry with the given name already
- * exists.
- */
-int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
- struct sysfs_dirent *parent_sd)
-{
- struct sysfs_inode_attrs *ps_iattr;
- int ret;
-
- if (!!sysfs_ns_type(parent_sd) != !!sd->s_ns) {
- WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
- sysfs_ns_type(parent_sd) ? "required" : "invalid",
- parent_sd->s_name, sd->s_name);
- return -EINVAL;
- }
-
- sd->s_hash = sysfs_name_hash(sd->s_name, sd->s_ns);
- sd->s_parent = sysfs_get(parent_sd);
-
- ret = sysfs_link_sibling(sd);
- if (ret)
- return ret;
-
- /* Update timestamps on the parent */
- ps_iattr = parent_sd->s_iattr;
- if (ps_iattr) {
- struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
- ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
- }
-
- /* Mark the entry added into directory tree */
- sd->s_flags &= ~SYSFS_FLAG_REMOVED;
-
- return 0;
-}
-
/**
* sysfs_pathname - return full path to sysfs dirent
- * @sd: sysfs_dirent whose path we want
+ * @kn: kernfs_node whose path we want
* @path: caller allocated buffer of size PATH_MAX
*
* Gives the name "/" to the sysfs_root entry; any path returned
* is relative to wherever sysfs is mounted.
*/
-static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
+static char *sysfs_pathname(struct kernfs_node *kn, char *path)
{
- if (sd->s_parent) {
- sysfs_pathname(sd->s_parent, path);
+ if (kn->parent) {
+ sysfs_pathname(kn->parent, path);
strlcat(path, "/", PATH_MAX);
}
- strlcat(path, sd->s_name, PATH_MAX);
+ strlcat(path, kn->name, PATH_MAX);
return path;
}
-void sysfs_warn_dup(struct sysfs_dirent *parent, const char *name)
+void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
{
char *path;
@@ -489,445 +55,34 @@ void sysfs_warn_dup(struct sysfs_dirent *parent, const char *name)
}
/**
- * sysfs_add_one - add sysfs_dirent to parent
- * @acxt: addrm context to use
- * @sd: sysfs_dirent to be added
- * @parent_sd: the parent sysfs_dirent to add @sd to
- *
- * Get @parent_sd and set @sd->s_parent to it and increment nlink of
- * the parent inode if @sd is a directory and link into the children
- * list of the parent.
- *
- * This function should be called between calls to
- * sysfs_addrm_start() and sysfs_addrm_finish() and should be
- * passed the same @acxt as passed to sysfs_addrm_start().
- *
- * LOCKING:
- * Determined by sysfs_addrm_start().
- *
- * RETURNS:
- * 0 on success, -EEXIST if entry with the given name already
- * exists.
- */
-int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
- struct sysfs_dirent *parent_sd)
-{
- int ret;
-
- ret = __sysfs_add_one(acxt, sd, parent_sd);
-
- if (ret == -EEXIST)
- sysfs_warn_dup(parent_sd, sd->s_name);
- return ret;
-}
-
-/**
- * sysfs_remove_one - remove sysfs_dirent from parent
- * @acxt: addrm context to use
- * @sd: sysfs_dirent to be removed
- *
- * Mark @sd removed and drop nlink of parent inode if @sd is a
- * directory. @sd is unlinked from the children list.
- *
- * This function should be called between calls to
- * sysfs_addrm_start() and sysfs_addrm_finish() and should be
- * passed the same @acxt as passed to sysfs_addrm_start().
- *
- * LOCKING:
- * Determined by sysfs_addrm_start().
- */
-static void sysfs_remove_one(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *sd)
-{
- struct sysfs_inode_attrs *ps_iattr;
-
- /*
- * Removal can be called multiple times on the same node. Only the
- * first invocation is effective and puts the base ref.
- */
- if (sd->s_flags & SYSFS_FLAG_REMOVED)
- return;
-
- sysfs_unlink_sibling(sd);
-
- /* Update timestamps on the parent */
- ps_iattr = sd->s_parent->s_iattr;
- if (ps_iattr) {
- struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
- ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
- }
-
- sd->s_flags |= SYSFS_FLAG_REMOVED;
- sd->u.removed_list = acxt->removed;
- acxt->removed = sd;
-}
-
-/**
- * sysfs_addrm_finish - finish up sysfs_dirent add/remove
- * @acxt: addrm context to finish up
- *
- * Finish up sysfs_dirent add/remove. Resources acquired by
- * sysfs_addrm_start() are released and removed sysfs_dirents are
- * cleaned up.
- *
- * LOCKING:
- * sysfs_mutex is released.
- */
-void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
- __releases(sysfs_mutex)
-{
- /* release resources acquired by sysfs_addrm_start() */
- mutex_unlock(&sysfs_mutex);
-
- /* kill removed sysfs_dirents */
- while (acxt->removed) {
- struct sysfs_dirent *sd = acxt->removed;
-
- acxt->removed = sd->u.removed_list;
-
- sysfs_deactivate(sd);
- sysfs_unmap_bin_file(sd);
- sysfs_put(sd);
- }
-}
-
-/**
- * sysfs_find_dirent - find sysfs_dirent with the given name
- * @parent_sd: sysfs_dirent to search under
- * @name: name to look for
- * @ns: the namespace tag to use
- *
- * Look for sysfs_dirent with name @name under @parent_sd.
- *
- * LOCKING:
- * mutex_lock(sysfs_mutex)
- *
- * RETURNS:
- * Pointer to sysfs_dirent if found, NULL if not.
- */
-struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
- const unsigned char *name,
- const void *ns)
-{
- struct rb_node *node = parent_sd->s_dir.children.rb_node;
- unsigned int hash;
-
- if (!!sysfs_ns_type(parent_sd) != !!ns) {
- WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
- sysfs_ns_type(parent_sd) ? "required" : "invalid",
- parent_sd->s_name, name);
- return NULL;
- }
-
- hash = sysfs_name_hash(name, ns);
- while (node) {
- struct sysfs_dirent *sd;
- int result;
-
- sd = to_sysfs_dirent(node);
- result = sysfs_name_compare(hash, name, ns, sd);
- if (result < 0)
- node = node->rb_left;
- else if (result > 0)
- node = node->rb_right;
- else
- return sd;
- }
- return NULL;
-}
-
-/**
- * sysfs_get_dirent_ns - find and get sysfs_dirent with the given name
- * @parent_sd: sysfs_dirent to search under
- * @name: name to look for
- * @ns: the namespace tag to use
- *
- * Look for sysfs_dirent with name @name under @parent_sd and get
- * it if found.
- *
- * LOCKING:
- * Kernel thread context (may sleep). Grabs sysfs_mutex.
- *
- * RETURNS:
- * Pointer to sysfs_dirent if found, NULL if not.
- */
-struct sysfs_dirent *sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd,
- const unsigned char *name,
- const void *ns)
-{
- struct sysfs_dirent *sd;
-
- mutex_lock(&sysfs_mutex);
- sd = sysfs_find_dirent(parent_sd, name, ns);
- sysfs_get(sd);
- mutex_unlock(&sysfs_mutex);
-
- return sd;
-}
-EXPORT_SYMBOL_GPL(sysfs_get_dirent_ns);
-
-static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
- enum kobj_ns_type type,
- const char *name, const void *ns,
- struct sysfs_dirent **p_sd)
-{
- umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
- struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent *sd;
- int rc;
-
- /* allocate */
- sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
- if (!sd)
- return -ENOMEM;
-
- sd->s_flags |= (type << SYSFS_NS_TYPE_SHIFT);
- sd->s_ns = ns;
- sd->s_dir.kobj = kobj;
-
- /* link in */
- sysfs_addrm_start(&acxt);
- rc = sysfs_add_one(&acxt, sd, parent_sd);
- sysfs_addrm_finish(&acxt);
-
- if (rc == 0)
- *p_sd = sd;
- else
- sysfs_put(sd);
-
- return rc;
-}
-
-int sysfs_create_subdir(struct kobject *kobj, const char *name,
- struct sysfs_dirent **p_sd)
-{
- return create_dir(kobj, kobj->sd,
- KOBJ_NS_TYPE_NONE, name, NULL, p_sd);
-}
-
-/**
- * sysfs_read_ns_type: return associated ns_type
- * @kobj: the kobject being queried
- *
- * Each kobject can be tagged with exactly one namespace type
- * (i.e. network or user). Return the ns_type associated with
- * this object if any
- */
-static enum kobj_ns_type sysfs_read_ns_type(struct kobject *kobj)
-{
- const struct kobj_ns_type_operations *ops;
- enum kobj_ns_type type;
-
- ops = kobj_child_ns_ops(kobj);
- if (!ops)
- return KOBJ_NS_TYPE_NONE;
-
- type = ops->type;
- BUG_ON(type <= KOBJ_NS_TYPE_NONE);
- BUG_ON(type >= KOBJ_NS_TYPES);
- BUG_ON(!kobj_ns_type_registered(type));
-
- return type;
-}
-
-/**
* sysfs_create_dir_ns - create a directory for an object with a namespace tag
* @kobj: object we're creating directory for
* @ns: the namespace tag to use
*/
int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
- enum kobj_ns_type type;
- struct sysfs_dirent *parent_sd, *sd;
- int error = 0;
+ struct kernfs_node *parent, *kn;
BUG_ON(!kobj);
if (kobj->parent)
- parent_sd = kobj->parent->sd;
+ parent = kobj->parent->sd;
else
- parent_sd = &sysfs_root;
+ parent = sysfs_root_kn;
- if (!parent_sd)
+ if (!parent)
return -ENOENT;
- type = sysfs_read_ns_type(kobj);
-
- error = create_dir(kobj, parent_sd, type, kobject_name(kobj), ns, &sd);
- if (!error)
- kobj->sd = sd;
- return error;
-}
-
-static struct dentry *sysfs_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
-{
- struct dentry *ret = NULL;
- struct dentry *parent = dentry->d_parent;
- struct sysfs_dirent *parent_sd = parent->d_fsdata;
- struct sysfs_dirent *sd;
- struct inode *inode;
- enum kobj_ns_type type;
- const void *ns;
-
- mutex_lock(&sysfs_mutex);
-
- type = sysfs_ns_type(parent_sd);
- ns = sysfs_info(dir->i_sb)->ns[type];
-
- sd = sysfs_find_dirent(parent_sd, dentry->d_name.name, ns);
-
- /* no such entry */
- if (!sd) {
- ret = ERR_PTR(-ENOENT);
- goto out_unlock;
- }
- dentry->d_fsdata = sysfs_get(sd);
-
- /* attach dentry and inode */
- inode = sysfs_get_inode(dir->i_sb, sd);
- if (!inode) {
- ret = ERR_PTR(-ENOMEM);
- goto out_unlock;
- }
-
- /* instantiate and hash dentry */
- ret = d_materialise_unique(dentry, inode);
- out_unlock:
- mutex_unlock(&sysfs_mutex);
- return ret;
-}
-
-const struct inode_operations sysfs_dir_inode_operations = {
- .lookup = sysfs_lookup,
- .permission = sysfs_permission,
- .setattr = sysfs_setattr,
- .getattr = sysfs_getattr,
- .setxattr = sysfs_setxattr,
-};
-
-static struct sysfs_dirent *sysfs_leftmost_descendant(struct sysfs_dirent *pos)
-{
- struct sysfs_dirent *last;
-
- while (true) {
- struct rb_node *rbn;
-
- last = pos;
-
- if (sysfs_type(pos) != SYSFS_DIR)
- break;
-
- rbn = rb_first(&pos->s_dir.children);
- if (!rbn)
- break;
-
- pos = to_sysfs_dirent(rbn);
- }
-
- return last;
-}
-
-/**
- * sysfs_next_descendant_post - find the next descendant for post-order walk
- * @pos: the current position (%NULL to initiate traversal)
- * @root: sysfs_dirent whose descendants to walk
- *
- * Find the next descendant to visit for post-order traversal of @root's
- * descendants. @root is included in the iteration and the last node to be
- * visited.
- */
-static struct sysfs_dirent *sysfs_next_descendant_post(struct sysfs_dirent *pos,
- struct sysfs_dirent *root)
-{
- struct rb_node *rbn;
-
- lockdep_assert_held(&sysfs_mutex);
-
- /* if first iteration, visit leftmost descendant which may be root */
- if (!pos)
- return sysfs_leftmost_descendant(root);
-
- /* if we visited @root, we're done */
- if (pos == root)
- return NULL;
-
- /* if there's an unvisited sibling, visit its leftmost descendant */
- rbn = rb_next(&pos->s_rb);
- if (rbn)
- return sysfs_leftmost_descendant(to_sysfs_dirent(rbn));
-
- /* no sibling left, visit parent */
- return pos->s_parent;
-}
-
-static void __sysfs_remove(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *sd)
-{
- struct sysfs_dirent *pos, *next;
-
- if (!sd)
- return;
-
- pr_debug("sysfs %s: removing\n", sd->s_name);
-
- next = NULL;
- do {
- pos = next;
- next = sysfs_next_descendant_post(pos, sd);
- if (pos)
- sysfs_remove_one(acxt, pos);
- } while (next);
-}
-
-/**
- * sysfs_remove - remove a sysfs_dirent recursively
- * @sd: the sysfs_dirent to remove
- *
- * Remove @sd along with all its subdirectories and files.
- */
-void sysfs_remove(struct sysfs_dirent *sd)
-{
- struct sysfs_addrm_cxt acxt;
-
- sysfs_addrm_start(&acxt);
- __sysfs_remove(&acxt, sd);
- sysfs_addrm_finish(&acxt);
-}
-
-/**
- * sysfs_hash_and_remove - find a sysfs_dirent by name and remove it
- * @dir_sd: parent of the target
- * @name: name of the sysfs_dirent to remove
- * @ns: namespace tag of the sysfs_dirent to remove
- *
- * Look for the sysfs_dirent with @name and @ns under @dir_sd and remove
- * it. Returns 0 on success, -ENOENT if such entry doesn't exist.
- */
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name,
- const void *ns)
-{
- struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent *sd;
-
- if (!dir_sd) {
- WARN(1, KERN_WARNING "sysfs: can not remove '%s', no directory\n",
- name);
- return -ENOENT;
+ kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
+ S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
+ if (IS_ERR(kn)) {
+ if (PTR_ERR(kn) == -EEXIST)
+ sysfs_warn_dup(parent, kobject_name(kobj));
+ return PTR_ERR(kn);
}
- sysfs_addrm_start(&acxt);
-
- sd = sysfs_find_dirent(dir_sd, name, ns);
- if (sd)
- __sysfs_remove(&acxt, sd);
-
- sysfs_addrm_finish(&acxt);
-
- if (sd)
- return 0;
- else
- return -ENOENT;
+ kobj->sd = kn;
+ return 0;
}
/**
@@ -940,207 +95,47 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name,
*/
void sysfs_remove_dir(struct kobject *kobj)
{
- struct sysfs_dirent *sd = kobj->sd;
+ struct kernfs_node *kn = kobj->sd;
/*
* In general, kboject owner is responsible for ensuring removal
* doesn't race with other operations and sysfs doesn't provide any
* protection; however, when @kobj is used as a symlink target, the
* symlinking entity usually doesn't own @kobj and thus has no
- * control over removal. @kobj->sd may be removed anytime and
- * symlink code may end up dereferencing an already freed sd.
+ * control over removal. @kobj->sd may be removed anytime
+ * and symlink code may end up dereferencing an already freed node.
*
- * sysfs_symlink_target_lock synchronizes @kobj->sd disassociation
- * against symlink operations so that symlink code can safely
- * dereference @kobj->sd.
+ * sysfs_symlink_target_lock synchronizes @kobj->sd
+ * disassociation against symlink operations so that symlink code
+ * can safely dereference @kobj->sd.
*/
spin_lock(&sysfs_symlink_target_lock);
kobj->sd = NULL;
spin_unlock(&sysfs_symlink_target_lock);
- if (sd) {
- WARN_ON_ONCE(sysfs_type(sd) != SYSFS_DIR);
- sysfs_remove(sd);
+ if (kn) {
+ WARN_ON_ONCE(kernfs_type(kn) != KERNFS_DIR);
+ kernfs_remove(kn);
}
}
-int sysfs_rename(struct sysfs_dirent *sd, struct sysfs_dirent *new_parent_sd,
- const char *new_name, const void *new_ns)
-{
- int error;
-
- mutex_lock(&sysfs_mutex);
-
- error = 0;
- if ((sd->s_parent == new_parent_sd) && (sd->s_ns == new_ns) &&
- (strcmp(sd->s_name, new_name) == 0))
- goto out; /* nothing to rename */
-
- error = -EEXIST;
- if (sysfs_find_dirent(new_parent_sd, new_name, new_ns))
- goto out;
-
- /* rename sysfs_dirent */
- if (strcmp(sd->s_name, new_name) != 0) {
- error = -ENOMEM;
- new_name = kstrdup(new_name, GFP_KERNEL);
- if (!new_name)
- goto out;
-
- kfree(sd->s_name);
- sd->s_name = new_name;
- }
-
- /*
- * Move to the appropriate place in the appropriate directories rbtree.
- */
- sysfs_unlink_sibling(sd);
- sysfs_get(new_parent_sd);
- sysfs_put(sd->s_parent);
- sd->s_ns = new_ns;
- sd->s_hash = sysfs_name_hash(sd->s_name, sd->s_ns);
- sd->s_parent = new_parent_sd;
- sysfs_link_sibling(sd);
-
- error = 0;
- out:
- mutex_unlock(&sysfs_mutex);
- return error;
-}
-
int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
const void *new_ns)
{
- struct sysfs_dirent *parent_sd = kobj->sd->s_parent;
+ struct kernfs_node *parent = kobj->sd->parent;
- return sysfs_rename(kobj->sd, parent_sd, new_name, new_ns);
+ return kernfs_rename_ns(kobj->sd, parent, new_name, new_ns);
}
int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
const void *new_ns)
{
- struct sysfs_dirent *sd = kobj->sd;
- struct sysfs_dirent *new_parent_sd;
+ struct kernfs_node *kn = kobj->sd;
+ struct kernfs_node *new_parent;
- BUG_ON(!sd->s_parent);
- new_parent_sd = new_parent_kobj && new_parent_kobj->sd ?
- new_parent_kobj->sd : &sysfs_root;
+ BUG_ON(!kn->parent);
+ new_parent = new_parent_kobj && new_parent_kobj->sd ?
+ new_parent_kobj->sd : sysfs_root_kn;
- return sysfs_rename(sd, new_parent_sd, sd->s_name, new_ns);
+ return kernfs_rename_ns(kn, new_parent, kn->name, new_ns);
}
-
-/* Relationship between s_mode and the DT_xxx types */
-static inline unsigned char dt_type(struct sysfs_dirent *sd)
-{
- return (sd->s_mode >> 12) & 15;
-}
-
-static int sysfs_dir_release(struct inode *inode, struct file *filp)
-{
- sysfs_put(filp->private_data);
- return 0;
-}
-
-static struct sysfs_dirent *sysfs_dir_pos(const void *ns,
- struct sysfs_dirent *parent_sd, loff_t hash, struct sysfs_dirent *pos)
-{
- if (pos) {
- int valid = !(pos->s_flags & SYSFS_FLAG_REMOVED) &&
- pos->s_parent == parent_sd &&
- hash == pos->s_hash;
- sysfs_put(pos);
- if (!valid)
- pos = NULL;
- }
- if (!pos && (hash > 1) && (hash < INT_MAX)) {
- struct rb_node *node = parent_sd->s_dir.children.rb_node;
- while (node) {
- pos = to_sysfs_dirent(node);
-
- if (hash < pos->s_hash)
- node = node->rb_left;
- else if (hash > pos->s_hash)
- node = node->rb_right;
- else
- break;
- }
- }
- /* Skip over entries in the wrong namespace */
- while (pos && pos->s_ns != ns) {
- struct rb_node *node = rb_next(&pos->s_rb);
- if (!node)
- pos = NULL;
- else
- pos = to_sysfs_dirent(node);
- }
- return pos;
-}
-
-static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
- struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos)
-{
- pos = sysfs_dir_pos(ns, parent_sd, ino, pos);
- if (pos)
- do {
- struct rb_node *node = rb_next(&pos->s_rb);
- if (!node)
- pos = NULL;
- else
- pos = to_sysfs_dirent(node);
- } while (pos && pos->s_ns != ns);
- return pos;
-}
-
-static int sysfs_readdir(struct file *file, struct dir_context *ctx)
-{
- struct dentry *dentry = file->f_path.dentry;
- struct sysfs_dirent *parent_sd = dentry->d_fsdata;
- struct sysfs_dirent *pos = file->private_data;
- enum kobj_ns_type type;
- const void *ns;
-
- type = sysfs_ns_type(parent_sd);
- ns = sysfs_info(dentry->d_sb)->ns[type];
-
- if (!dir_emit_dots(file, ctx))
- return 0;
- mutex_lock(&sysfs_mutex);
- for (pos = sysfs_dir_pos(ns, parent_sd, ctx->pos, pos);
- pos;
- pos = sysfs_dir_next_pos(ns, parent_sd, ctx->pos, pos)) {
- const char *name = pos->s_name;
- unsigned int type = dt_type(pos);
- int len = strlen(name);
- ino_t ino = pos->s_ino;
- ctx->pos = pos->s_hash;
- file->private_data = sysfs_get(pos);
-
- mutex_unlock(&sysfs_mutex);
- if (!dir_emit(ctx, name, len, ino, type))
- return 0;
- mutex_lock(&sysfs_mutex);
- }
- mutex_unlock(&sysfs_mutex);
- file->private_data = NULL;
- ctx->pos = INT_MAX;
- return 0;
-}
-
-static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
-{
- struct inode *inode = file_inode(file);
- loff_t ret;
-
- mutex_lock(&inode->i_mutex);
- ret = generic_file_llseek(file, offset, whence);
- mutex_unlock(&inode->i_mutex);
-
- return ret;
-}
-
-const struct file_operations sysfs_dir_operations = {
- .read = generic_read_dir,
- .iterate = sysfs_readdir,
- .release = sysfs_dir_release,
- .llseek = sysfs_dir_llseek,
-};
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 35e7d08..810cf6e 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -14,70 +14,23 @@
#include <linux/kobject.h>
#include <linux/kallsyms.h>
#include <linux/slab.h>
-#include <linux/fsnotify.h>
-#include <linux/namei.h>
-#include <linux/poll.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/limits.h>
-#include <linux/uaccess.h>
#include <linux/seq_file.h>
-#include <linux/mm.h>
#include "sysfs.h"
+#include "../kernfs/kernfs-internal.h"
/*
- * There's one sysfs_open_file for each open file and one sysfs_open_dirent
- * for each sysfs_dirent with one or more open files.
- *
- * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open is
- * protected by sysfs_open_dirent_lock.
- *
- * filp->private_data points to seq_file whose ->private points to
- * sysfs_open_file. sysfs_open_files are chained at
- * sysfs_open_dirent->files, which is protected by sysfs_open_file_mutex.
- */
-static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
-static DEFINE_MUTEX(sysfs_open_file_mutex);
-
-struct sysfs_open_dirent {
- atomic_t refcnt;
- atomic_t event;
- wait_queue_head_t poll;
- struct list_head files; /* goes through sysfs_open_file.list */
-};
-
-struct sysfs_open_file {
- struct sysfs_dirent *sd;
- struct file *file;
- struct mutex mutex;
- int event;
- struct list_head list;
-
- bool mmapped;
- const struct vm_operations_struct *vm_ops;
-};
-
-static bool sysfs_is_bin(struct sysfs_dirent *sd)
-{
- return sysfs_type(sd) == SYSFS_KOBJ_BIN_ATTR;
-}
-
-static struct sysfs_open_file *sysfs_of(struct file *file)
-{
- return ((struct seq_file *)file->private_data)->private;
-}
-
-/*
- * Determine ktype->sysfs_ops for the given sysfs_dirent. This function
+ * Determine ktype->sysfs_ops for the given kernfs_node. This function
* must be called while holding an active reference.
*/
-static const struct sysfs_ops *sysfs_file_ops(struct sysfs_dirent *sd)
+static const struct sysfs_ops *sysfs_file_ops(struct kernfs_node *kn)
{
- struct kobject *kobj = sd->s_parent->s_dir.kobj;
+ struct kobject *kobj = kn->parent->priv;
- if (!sysfs_ignore_lockdep(sd))
- lockdep_assert_held(sd);
+ if (kn->flags & KERNFS_LOCKDEP)
+ lockdep_assert_held(kn);
return kobj->ktype ? kobj->ktype->sysfs_ops : NULL;
}
@@ -86,13 +39,13 @@ static const struct sysfs_ops *sysfs_file_ops(struct sysfs_dirent *sd)
* details like buffering and seeking. The following function pipes
* sysfs_ops->show() result through seq_file.
*/
-static int sysfs_seq_show(struct seq_file *sf, void *v)
+static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
{
- struct sysfs_open_file *of = sf->private;
- struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
- const struct sysfs_ops *ops;
- char *buf;
+ struct kernfs_open_file *of = sf->private;
+ struct kobject *kobj = of->kn->parent->priv;
+ const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
ssize_t count;
+ char *buf;
/* acquire buffer and ensure that it's >= PAGE_SIZE */
count = seq_get_buf(sf, &buf);
@@ -102,34 +55,15 @@ static int sysfs_seq_show(struct seq_file *sf, void *v)
}
/*
- * Need @of->sd for attr and ops, its parent for kobj. @of->mutex
- * nests outside active ref and is just to ensure that the ops
- * aren't called concurrently for the same open file.
+ * Invoke show(). Control may reach here via seq file lseek even
+ * if @ops->show() isn't implemented.
*/
- mutex_lock(&of->mutex);
- if (!sysfs_get_active(of->sd)) {
- mutex_unlock(&of->mutex);
- return -ENODEV;
+ if (ops->show) {
+ count = ops->show(kobj, of->kn->priv, buf);
+ if (count < 0)
+ return count;
}
- of->event = atomic_read(&of->sd->s_attr.open->event);
-
- /*
- * Lookup @ops and invoke show(). Control may reach here via seq
- * file lseek even if @ops->show() isn't implemented.
- */
- ops = sysfs_file_ops(of->sd);
- if (ops->show)
- count = ops->show(kobj, of->sd->s_attr.attr, buf);
- else
- count = 0;
-
- sysfs_put_active(of->sd);
- mutex_unlock(&of->mutex);
-
- if (count < 0)
- return count;
-
/*
* The code works fine with PAGE_SIZE return but it's likely to
* indicate truncated result or overflow in normal use cases.
@@ -144,726 +78,194 @@ static int sysfs_seq_show(struct seq_file *sf, void *v)
return 0;
}
-/*
- * Read method for bin files. As reading a bin file can have side-effects,
- * the exact offset and bytes specified in read(2) call should be passed to
- * the read callback making it difficult to use seq_file. Implement
- * simplistic custom buffering for bin files.
- */
-static ssize_t sysfs_bin_read(struct file *file, char __user *userbuf,
- size_t bytes, loff_t *off)
+static ssize_t sysfs_kf_bin_read(struct kernfs_open_file *of, char *buf,
+ size_t count, loff_t pos)
{
- struct sysfs_open_file *of = sysfs_of(file);
- struct bin_attribute *battr = of->sd->s_attr.bin_attr;
- struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
- loff_t size = file_inode(file)->i_size;
- int count = min_t(size_t, bytes, PAGE_SIZE);
- loff_t offs = *off;
- char *buf;
+ struct bin_attribute *battr = of->kn->priv;
+ struct kobject *kobj = of->kn->parent->priv;
+ loff_t size = file_inode(of->file)->i_size;
- if (!bytes)
+ if (!count)
return 0;
if (size) {
- if (offs > size)
+ if (pos > size)
return 0;
- if (offs + count > size)
- count = size - offs;
- }
-
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* need of->sd for battr, its parent for kobj */
- mutex_lock(&of->mutex);
- if (!sysfs_get_active(of->sd)) {
- count = -ENODEV;
- mutex_unlock(&of->mutex);
- goto out_free;
- }
-
- if (battr->read)
- count = battr->read(file, kobj, battr, buf, offs, count);
- else
- count = -EIO;
-
- sysfs_put_active(of->sd);
- mutex_unlock(&of->mutex);
-
- if (count < 0)
- goto out_free;
-
- if (copy_to_user(userbuf, buf, count)) {
- count = -EFAULT;
- goto out_free;
+ if (pos + count > size)
+ count = size - pos;
}
- pr_debug("offs = %lld, *off = %lld, count = %d\n", offs, *off, count);
-
- *off = offs + count;
+ if (!battr->read)
+ return -EIO;
- out_free:
- kfree(buf);
- return count;
+ return battr->read(of->file, kobj, battr, buf, pos, count);
}
-/**
- * flush_write_buffer - push buffer to kobject
- * @of: open file
- * @buf: data buffer for file
- * @off: file offset to write to
- * @count: number of bytes
- *
- * Get the correct pointers for the kobject and the attribute we're dealing
- * with, then call the store() method for it with @buf.
- */
-static int flush_write_buffer(struct sysfs_open_file *of, char *buf, loff_t off,
- size_t count)
+/* kernfs write callback for regular sysfs files */
+static ssize_t sysfs_kf_write(struct kernfs_open_file *of, char *buf,
+ size_t count, loff_t pos)
{
- struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
- int rc = 0;
-
- /*
- * Need @of->sd for attr and ops, its parent for kobj. @of->mutex
- * nests outside active ref and is just to ensure that the ops
- * aren't called concurrently for the same open file.
- */
- mutex_lock(&of->mutex);
- if (!sysfs_get_active(of->sd)) {
- mutex_unlock(&of->mutex);
- return -ENODEV;
- }
+ const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+ struct kobject *kobj = of->kn->parent->priv;
- if (sysfs_is_bin(of->sd)) {
- struct bin_attribute *battr = of->sd->s_attr.bin_attr;
-
- rc = -EIO;
- if (battr->write)
- rc = battr->write(of->file, kobj, battr, buf, off,
- count);
- } else {
- const struct sysfs_ops *ops = sysfs_file_ops(of->sd);
-
- rc = ops->store(kobj, of->sd->s_attr.attr, buf, count);
- }
-
- sysfs_put_active(of->sd);
- mutex_unlock(&of->mutex);
+ if (!count)
+ return 0;
- return rc;
+ return ops->store(kobj, of->kn->priv, buf, count);
}
-/**
- * sysfs_write_file - write an attribute
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
- * Copy data in from userland and pass it to the matching
- * sysfs_ops->store() by invoking flush_write_buffer().
- *
- * There is no easy way for us to know if userspace is only doing a partial
- * write, so we don't support them. We expect the entire buffer to come on
- * the first write. Hint: if you're writing a value, first read the file,
- * modify only the the value you're changing, then write entire buffer
- * back.
- */
-static ssize_t sysfs_write_file(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
+/* kernfs write callback for bin sysfs files */
+static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf,
+ size_t count, loff_t pos)
{
- struct sysfs_open_file *of = sysfs_of(file);
- ssize_t len = min_t(size_t, count, PAGE_SIZE);
- loff_t size = file_inode(file)->i_size;
- char *buf;
+ struct bin_attribute *battr = of->kn->priv;
+ struct kobject *kobj = of->kn->parent->priv;
+ loff_t size = file_inode(of->file)->i_size;
- if (sysfs_is_bin(of->sd) && size) {
- if (size <= *ppos)
+ if (size) {
+ if (size <= pos)
return 0;
- len = min_t(ssize_t, len, size - *ppos);
+ count = min_t(ssize_t, count, size - pos);
}
-
- if (!len)
+ if (!count)
return 0;
- buf = kmalloc(len + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!battr->write)
+ return -EIO;
- if (copy_from_user(buf, user_buf, len)) {
- len = -EFAULT;
- goto out_free;
- }
- buf[len] = '\0'; /* guarantee string termination */
-
- len = flush_write_buffer(of, buf, *ppos, len);
- if (len > 0)
- *ppos += len;
-out_free:
- kfree(buf);
- return len;
-}
-
-static void sysfs_bin_vma_open(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
-
- if (!of->vm_ops)
- return;
-
- if (!sysfs_get_active(of->sd))
- return;
-
- if (of->vm_ops->open)
- of->vm_ops->open(vma);
-
- sysfs_put_active(of->sd);
+ return battr->write(of->file, kobj, battr, buf, pos, count);
}
-static int sysfs_bin_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
+ struct vm_area_struct *vma)
{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
- int ret;
+ struct bin_attribute *battr = of->kn->priv;
+ struct kobject *kobj = of->kn->parent->priv;
- if (!of->vm_ops)
- return VM_FAULT_SIGBUS;
-
- if (!sysfs_get_active(of->sd))
- return VM_FAULT_SIGBUS;
-
- ret = VM_FAULT_SIGBUS;
- if (of->vm_ops->fault)
- ret = of->vm_ops->fault(vma, vmf);
-
- sysfs_put_active(of->sd);
- return ret;
+ return battr->mmap(of->file, kobj, battr, vma);
}
-static int sysfs_bin_page_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr)
{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
- int ret;
-
- if (!of->vm_ops)
- return VM_FAULT_SIGBUS;
+ struct kernfs_node *kn = kobj->sd, *tmp;
- if (!sysfs_get_active(of->sd))
- return VM_FAULT_SIGBUS;
-
- ret = 0;
- if (of->vm_ops->page_mkwrite)
- ret = of->vm_ops->page_mkwrite(vma, vmf);
+ if (kn && dir)
+ kn = kernfs_find_and_get(kn, dir);
else
- file_update_time(file);
-
- sysfs_put_active(of->sd);
- return ret;
-}
-
-static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
-{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
- int ret;
-
- if (!of->vm_ops)
- return -EINVAL;
-
- if (!sysfs_get_active(of->sd))
- return -EINVAL;
-
- ret = -EINVAL;
- if (of->vm_ops->access)
- ret = of->vm_ops->access(vma, addr, buf, len, write);
-
- sysfs_put_active(of->sd);
- return ret;
-}
-
-#ifdef CONFIG_NUMA
-static int sysfs_bin_set_policy(struct vm_area_struct *vma,
- struct mempolicy *new)
-{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
- int ret;
-
- if (!of->vm_ops)
- return 0;
-
- if (!sysfs_get_active(of->sd))
- return -EINVAL;
-
- ret = 0;
- if (of->vm_ops->set_policy)
- ret = of->vm_ops->set_policy(vma, new);
-
- sysfs_put_active(of->sd);
- return ret;
-}
-
-static struct mempolicy *sysfs_bin_get_policy(struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
- struct mempolicy *pol;
-
- if (!of->vm_ops)
- return vma->vm_policy;
-
- if (!sysfs_get_active(of->sd))
- return vma->vm_policy;
-
- pol = vma->vm_policy;
- if (of->vm_ops->get_policy)
- pol = of->vm_ops->get_policy(vma, addr);
-
- sysfs_put_active(of->sd);
- return pol;
-}
-
-static int sysfs_bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
- const nodemask_t *to, unsigned long flags)
-{
- struct file *file = vma->vm_file;
- struct sysfs_open_file *of = sysfs_of(file);
- int ret;
-
- if (!of->vm_ops)
- return 0;
-
- if (!sysfs_get_active(of->sd))
- return 0;
-
- ret = 0;
- if (of->vm_ops->migrate)
- ret = of->vm_ops->migrate(vma, from, to, flags);
-
- sysfs_put_active(of->sd);
- return ret;
-}
-#endif
-
-static const struct vm_operations_struct sysfs_bin_vm_ops = {
- .open = sysfs_bin_vma_open,
- .fault = sysfs_bin_fault,
- .page_mkwrite = sysfs_bin_page_mkwrite,
- .access = sysfs_bin_access,
-#ifdef CONFIG_NUMA
- .set_policy = sysfs_bin_set_policy,
- .get_policy = sysfs_bin_get_policy,
- .migrate = sysfs_bin_migrate,
-#endif
-};
-
-static int sysfs_bin_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct sysfs_open_file *of = sysfs_of(file);
- struct bin_attribute *battr = of->sd->s_attr.bin_attr;
- struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
- int rc;
-
- mutex_lock(&of->mutex);
-
- /* need of->sd for battr, its parent for kobj */
- rc = -ENODEV;
- if (!sysfs_get_active(of->sd))
- goto out_unlock;
-
- if (!battr->mmap)
- goto out_put;
-
- rc = battr->mmap(file, kobj, battr, vma);
- if (rc)
- goto out_put;
-
- /*
- * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
- * to satisfy versions of X which crash if the mmap fails: that
- * substitutes a new vm_file, and we don't then want bin_vm_ops.
- */
- if (vma->vm_file != file)
- goto out_put;
-
- rc = -EINVAL;
- if (of->mmapped && of->vm_ops != vma->vm_ops)
- goto out_put;
+ kernfs_get(kn);
- /*
- * It is not possible to successfully wrap close.
- * So error if someone is trying to use close.
- */
- rc = -EINVAL;
- if (vma->vm_ops && vma->vm_ops->close)
- goto out_put;
-
- rc = 0;
- of->mmapped = 1;
- of->vm_ops = vma->vm_ops;
- vma->vm_ops = &sysfs_bin_vm_ops;
-out_put:
- sysfs_put_active(of->sd);
-out_unlock:
- mutex_unlock(&of->mutex);
-
- return rc;
-}
-
-/**
- * sysfs_get_open_dirent - get or create sysfs_open_dirent
- * @sd: target sysfs_dirent
- * @of: sysfs_open_file for this instance of open
- *
- * If @sd->s_attr.open exists, increment its reference count;
- * otherwise, create one. @of is chained to the files list.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
- struct sysfs_open_file *of)
-{
- struct sysfs_open_dirent *od, *new_od = NULL;
-
- retry:
- mutex_lock(&sysfs_open_file_mutex);
- spin_lock_irq(&sysfs_open_dirent_lock);
-
- if (!sd->s_attr.open && new_od) {
- sd->s_attr.open = new_od;
- new_od = NULL;
+ if (kn && attr) {
+ tmp = kernfs_find_and_get(kn, attr);
+ kernfs_put(kn);
+ kn = tmp;
}
- od = sd->s_attr.open;
- if (od) {
- atomic_inc(&od->refcnt);
- list_add_tail(&of->list, &od->files);
- }
-
- spin_unlock_irq(&sysfs_open_dirent_lock);
- mutex_unlock(&sysfs_open_file_mutex);
-
- if (od) {
- kfree(new_od);
- return 0;
+ if (kn) {
+ kernfs_notify(kn);
+ kernfs_put(kn);
}
+}
+EXPORT_SYMBOL_GPL(sysfs_notify);
- /* not there, initialize a new one and retry */
- new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
- if (!new_od)
- return -ENOMEM;
+static const struct kernfs_ops sysfs_file_kfops_empty = {
+};
- atomic_set(&new_od->refcnt, 0);
- atomic_set(&new_od->event, 1);
- init_waitqueue_head(&new_od->poll);
- INIT_LIST_HEAD(&new_od->files);
- goto retry;
-}
+static const struct kernfs_ops sysfs_file_kfops_ro = {
+ .seq_show = sysfs_kf_seq_show,
+};
-/**
- * sysfs_put_open_dirent - put sysfs_open_dirent
- * @sd: target sysfs_dirent
- * @of: associated sysfs_open_file
- *
- * Put @sd->s_attr.open and unlink @of from the files list. If
- * reference count reaches zero, disassociate and free it.
- *
- * LOCKING:
- * None.
- */
-static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
- struct sysfs_open_file *of)
-{
- struct sysfs_open_dirent *od = sd->s_attr.open;
- unsigned long flags;
+static const struct kernfs_ops sysfs_file_kfops_wo = {
+ .write = sysfs_kf_write,
+};
- mutex_lock(&sysfs_open_file_mutex);
- spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
+static const struct kernfs_ops sysfs_file_kfops_rw = {
+ .seq_show = sysfs_kf_seq_show,
+ .write = sysfs_kf_write,
+};
- if (of)
- list_del(&of->list);
+static const struct kernfs_ops sysfs_bin_kfops_ro = {
+ .read = sysfs_kf_bin_read,
+};
- if (atomic_dec_and_test(&od->refcnt))
- sd->s_attr.open = NULL;
- else
- od = NULL;
+static const struct kernfs_ops sysfs_bin_kfops_wo = {
+ .write = sysfs_kf_bin_write,
+};
- spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
- mutex_unlock(&sysfs_open_file_mutex);
+static const struct kernfs_ops sysfs_bin_kfops_rw = {
+ .read = sysfs_kf_bin_read,
+ .write = sysfs_kf_bin_write,
+};
- kfree(od);
-}
+static const struct kernfs_ops sysfs_bin_kfops_mmap = {
+ .read = sysfs_kf_bin_read,
+ .write = sysfs_kf_bin_write,
+ .mmap = sysfs_kf_bin_mmap,
+};
-static int sysfs_open_file(struct inode *inode, struct file *file)
+int sysfs_add_file_mode_ns(struct kernfs_node *parent,
+ const struct attribute *attr, bool is_bin,
+ umode_t mode, const void *ns)
{
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- struct sysfs_open_file *of;
- bool has_read, has_write;
- int error = -EACCES;
-
- /* need attr_sd for attr and ops, its parent for kobj */
- if (!sysfs_get_active(attr_sd))
- return -ENODEV;
+ struct lock_class_key *key = NULL;
+ const struct kernfs_ops *ops;
+ struct kernfs_node *kn;
+ loff_t size;
- if (sysfs_is_bin(attr_sd)) {
- struct bin_attribute *battr = attr_sd->s_attr.bin_attr;
-
- has_read = battr->read || battr->mmap;
- has_write = battr->write || battr->mmap;
- } else {
- const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
+ if (!is_bin) {
+ struct kobject *kobj = parent->priv;
+ const struct sysfs_ops *sysfs_ops = kobj->ktype->sysfs_ops;
/* every kobject with an attribute needs a ktype assigned */
- if (WARN(!ops, KERN_ERR
+ if (WARN(!sysfs_ops, KERN_ERR
"missing sysfs attribute operations for kobject: %s\n",
kobject_name(kobj)))
- goto err_out;
-
- has_read = ops->show;
- has_write = ops->store;
- }
-
- /* check perms and supported operations */
- if ((file->f_mode & FMODE_WRITE) &&
- (!(inode->i_mode & S_IWUGO) || !has_write))
- goto err_out;
-
- if ((file->f_mode & FMODE_READ) &&
- (!(inode->i_mode & S_IRUGO) || !has_read))
- goto err_out;
-
- /* allocate a sysfs_open_file for the file */
- error = -ENOMEM;
- of = kzalloc(sizeof(struct sysfs_open_file), GFP_KERNEL);
- if (!of)
- goto err_out;
-
- /*
- * The following is done to give a different lockdep key to
- * @of->mutex for files which implement mmap. This is a rather
- * crude way to avoid false positive lockdep warning around
- * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
- * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
- * which mm->mmap_sem nests, while holding @of->mutex. As each
- * open file has a separate mutex, it's okay as long as those don't
- * happen on the same file. At this point, we can't easily give
- * each file a separate locking class. Let's differentiate on
- * whether the file is bin or not for now.
- */
- if (sysfs_is_bin(attr_sd))
- mutex_init(&of->mutex);
- else
- mutex_init(&of->mutex);
-
- of->sd = attr_sd;
- of->file = file;
-
- /*
- * Always instantiate seq_file even if read access doesn't use
- * seq_file or is not requested. This unifies private data access
- * and readable regular files are the vast majority anyway.
- */
- if (sysfs_is_bin(attr_sd))
- error = single_open(file, NULL, of);
- else
- error = single_open(file, sysfs_seq_show, of);
- if (error)
- goto err_free;
-
- /* seq_file clears PWRITE unconditionally, restore it if WRITE */
- if (file->f_mode & FMODE_WRITE)
- file->f_mode |= FMODE_PWRITE;
-
- /* make sure we have open dirent struct */
- error = sysfs_get_open_dirent(attr_sd, of);
- if (error)
- goto err_close;
-
- /* open succeeded, put active references */
- sysfs_put_active(attr_sd);
- return 0;
-
-err_close:
- single_release(inode, file);
-err_free:
- kfree(of);
-err_out:
- sysfs_put_active(attr_sd);
- return error;
-}
-
-static int sysfs_release(struct inode *inode, struct file *filp)
-{
- struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
- struct sysfs_open_file *of = sysfs_of(filp);
-
- sysfs_put_open_dirent(sd, of);
- single_release(inode, filp);
- kfree(of);
-
- return 0;
-}
-
-void sysfs_unmap_bin_file(struct sysfs_dirent *sd)
-{
- struct sysfs_open_dirent *od;
- struct sysfs_open_file *of;
-
- if (!sysfs_is_bin(sd))
- return;
-
- spin_lock_irq(&sysfs_open_dirent_lock);
- od = sd->s_attr.open;
- if (od)
- atomic_inc(&od->refcnt);
- spin_unlock_irq(&sysfs_open_dirent_lock);
- if (!od)
- return;
-
- mutex_lock(&sysfs_open_file_mutex);
- list_for_each_entry(of, &od->files, list) {
- struct inode *inode = file_inode(of->file);
- unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+ return -EINVAL;
+
+ if (sysfs_ops->show && sysfs_ops->store)
+ ops = &sysfs_file_kfops_rw;
+ else if (sysfs_ops->show)
+ ops = &sysfs_file_kfops_ro;
+ else if (sysfs_ops->store)
+ ops = &sysfs_file_kfops_wo;
+ else
+ ops = &sysfs_file_kfops_empty;
+
+ size = PAGE_SIZE;
+ } else {
+ struct bin_attribute *battr = (void *)attr;
+
+ if (battr->mmap)
+ ops = &sysfs_bin_kfops_mmap;
+ else if (battr->read && battr->write)
+ ops = &sysfs_bin_kfops_rw;
+ else if (battr->read)
+ ops = &sysfs_bin_kfops_ro;
+ else if (battr->write)
+ ops = &sysfs_bin_kfops_wo;
+ else
+ ops = &sysfs_file_kfops_empty;
+
+ size = battr->size;
}
- mutex_unlock(&sysfs_open_file_mutex);
-
- sysfs_put_open_dirent(sd, NULL);
-}
-
-/* Sysfs attribute files are pollable. The idea is that you read
- * the content and then you use 'poll' or 'select' to wait for
- * the content to change. When the content changes (assuming the
- * manager for the kobject supports notification), poll will
- * return POLLERR|POLLPRI, and select will return the fd whether
- * it is waiting for read, write, or exceptions.
- * Once poll/select indicates that the value has changed, you
- * need to close and re-open the file, or seek to 0 and read again.
- * Reminder: this only works for attributes which actively support
- * it, and it is not possible to test an attribute from userspace
- * to see if it supports poll (Neither 'poll' nor 'select' return
- * an appropriate error code). When in doubt, set a suitable timeout value.
- */
-static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
-{
- struct sysfs_open_file *of = sysfs_of(filp);
- struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
- struct sysfs_open_dirent *od = attr_sd->s_attr.open;
-
- /* need parent for the kobj, grab both */
- if (!sysfs_get_active(attr_sd))
- goto trigger;
-
- poll_wait(filp, &od->poll, wait);
- sysfs_put_active(attr_sd);
-
- if (of->event != atomic_read(&od->event))
- goto trigger;
-
- return DEFAULT_POLLMASK;
-
- trigger:
- return DEFAULT_POLLMASK|POLLERR|POLLPRI;
-}
-
-void sysfs_notify_dirent(struct sysfs_dirent *sd)
-{
- struct sysfs_open_dirent *od;
- unsigned long flags;
-
- spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
-
- if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
- od = sd->s_attr.open;
- if (od) {
- atomic_inc(&od->event);
- wake_up_interruptible(&od->poll);
- }
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (!attr->ignore_lockdep)
+ key = attr->key ?: (struct lock_class_key *)&attr->skey;
+#endif
+ kn = __kernfs_create_file(parent, attr->name, mode, size, ops,
+ (void *)attr, ns, true, key);
+ if (IS_ERR(kn)) {
+ if (PTR_ERR(kn) == -EEXIST)
+ sysfs_warn_dup(parent, attr->name);
+ return PTR_ERR(kn);
}
-
- spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
-}
-EXPORT_SYMBOL_GPL(sysfs_notify_dirent);
-
-void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
-{
- struct sysfs_dirent *sd = k->sd;
-
- mutex_lock(&sysfs_mutex);
-
- if (sd && dir)
- sd = sysfs_find_dirent(sd, dir, NULL);
- if (sd && attr)
- sd = sysfs_find_dirent(sd, attr, NULL);
- if (sd)
- sysfs_notify_dirent(sd);
-
- mutex_unlock(&sysfs_mutex);
-}
-EXPORT_SYMBOL_GPL(sysfs_notify);
-
-const struct file_operations sysfs_file_operations = {
- .read = seq_read,
- .write = sysfs_write_file,
- .llseek = generic_file_llseek,
- .open = sysfs_open_file,
- .release = sysfs_release,
- .poll = sysfs_poll,
-};
-
-const struct file_operations sysfs_bin_operations = {
- .read = sysfs_bin_read,
- .write = sysfs_write_file,
- .llseek = generic_file_llseek,
- .mmap = sysfs_bin_mmap,
- .open = sysfs_open_file,
- .release = sysfs_release,
- .poll = sysfs_poll,
-};
-
-int sysfs_add_file_mode_ns(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type,
- umode_t amode, const void *ns)
-{
- umode_t mode = (amode & S_IALLUGO) | S_IFREG;
- struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent *sd;
- int rc;
-
- sd = sysfs_new_dirent(attr->name, mode, type);
- if (!sd)
- return -ENOMEM;
-
- sd->s_ns = ns;
- sd->s_attr.attr = (void *)attr;
- sysfs_dirent_init_lockdep(sd);
-
- sysfs_addrm_start(&acxt);
- rc = sysfs_add_one(&acxt, sd, dir_sd);
- sysfs_addrm_finish(&acxt);
-
- if (rc)
- sysfs_put(sd);
-
- return rc;
+ return 0;
}
-
-int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
- int type)
+int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr,
+ bool is_bin)
{
- return sysfs_add_file_mode_ns(dir_sd, attr, type, attr->mode, NULL);
+ return sysfs_add_file_mode_ns(parent, attr, is_bin, attr->mode, NULL);
}
/**
@@ -877,8 +279,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
{
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file_mode_ns(kobj->sd, attr, SYSFS_KOBJ_ATTR,
- attr->mode, ns);
+ return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ns);
}
EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
@@ -906,19 +307,21 @@ EXPORT_SYMBOL_GPL(sysfs_create_files);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
- struct sysfs_dirent *dir_sd;
+ struct kernfs_node *parent;
int error;
- if (group)
- dir_sd = sysfs_get_dirent(kobj->sd, group);
- else
- dir_sd = sysfs_get(kobj->sd);
+ if (group) {
+ parent = kernfs_find_and_get(kobj->sd, group);
+ } else {
+ parent = kobj->sd;
+ kernfs_get(parent);
+ }
- if (!dir_sd)
+ if (!parent)
return -ENOENT;
- error = sysfs_add_file(dir_sd, attr, SYSFS_KOBJ_ATTR);
- sysfs_put(dir_sd);
+ error = sysfs_add_file(parent, attr, false);
+ kernfs_put(parent);
return error;
}
@@ -934,23 +337,20 @@ EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
umode_t mode)
{
- struct sysfs_dirent *sd;
+ struct kernfs_node *kn;
struct iattr newattrs;
int rc;
- mutex_lock(&sysfs_mutex);
-
- rc = -ENOENT;
- sd = sysfs_find_dirent(kobj->sd, attr->name, NULL);
- if (!sd)
- goto out;
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (!kn)
+ return -ENOENT;
- newattrs.ia_mode = (mode & S_IALLUGO) | (sd->s_mode & ~S_IALLUGO);
+ newattrs.ia_mode = (mode & S_IALLUGO) | (kn->mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE;
- rc = sysfs_sd_setattr(sd, &newattrs);
- out:
- mutex_unlock(&sysfs_mutex);
+ rc = kernfs_setattr(kn, &newattrs);
+
+ kernfs_put(kn);
return rc;
}
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
@@ -966,9 +366,9 @@ EXPORT_SYMBOL_GPL(sysfs_chmod_file);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns)
{
- struct sysfs_dirent *dir_sd = kobj->sd;
+ struct kernfs_node *parent = kobj->sd;
- sysfs_hash_and_remove(dir_sd, attr->name, ns);
+ kernfs_remove_by_name_ns(parent, attr->name, ns);
}
EXPORT_SYMBOL_GPL(sysfs_remove_file_ns);
@@ -989,15 +389,18 @@ EXPORT_SYMBOL_GPL(sysfs_remove_files);
void sysfs_remove_file_from_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
- struct sysfs_dirent *dir_sd;
+ struct kernfs_node *parent;
- if (group)
- dir_sd = sysfs_get_dirent(kobj->sd, group);
- else
- dir_sd = sysfs_get(kobj->sd);
- if (dir_sd) {
- sysfs_hash_and_remove(dir_sd, attr->name, NULL);
- sysfs_put(dir_sd);
+ if (group) {
+ parent = kernfs_find_and_get(kobj->sd, group);
+ } else {
+ parent = kobj->sd;
+ kernfs_get(parent);
+ }
+
+ if (parent) {
+ kernfs_remove_by_name(parent, attr->name);
+ kernfs_put(parent);
}
}
EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
@@ -1012,7 +415,7 @@ int sysfs_create_bin_file(struct kobject *kobj,
{
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file(kobj->sd, &attr->attr, SYSFS_KOBJ_BIN_ATTR);
+ return sysfs_add_file(kobj->sd, &attr->attr, true);
}
EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
@@ -1024,7 +427,7 @@ EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
- sysfs_hash_and_remove(kobj->sd, attr->attr.name, NULL);
+ kernfs_remove_by_name(kobj->sd, attr->attr.name);
}
EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 1898a10..6b57938 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -18,7 +18,7 @@
#include "sysfs.h"
-static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
+static void remove_files(struct kernfs_node *parent, struct kobject *kobj,
const struct attribute_group *grp)
{
struct attribute *const *attr;
@@ -26,13 +26,13 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
if (grp->attrs)
for (attr = grp->attrs; *attr; attr++)
- sysfs_hash_and_remove(dir_sd, (*attr)->name, NULL);
+ kernfs_remove_by_name(parent, (*attr)->name);
if (grp->bin_attrs)
for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++)
sysfs_remove_bin_file(kobj, *bin_attr);
}
-static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
+static int create_files(struct kernfs_node *parent, struct kobject *kobj,
const struct attribute_group *grp, int update)
{
struct attribute *const *attr;
@@ -49,22 +49,20 @@ static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
* re-adding (if required) the file.
*/
if (update)
- sysfs_hash_and_remove(dir_sd, (*attr)->name,
- NULL);
+ kernfs_remove_by_name(parent, (*attr)->name);
if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
if (!mode)
continue;
}
- error = sysfs_add_file_mode_ns(dir_sd, *attr,
- SYSFS_KOBJ_ATTR,
+ error = sysfs_add_file_mode_ns(parent, *attr, false,
(*attr)->mode | mode,
NULL);
if (unlikely(error))
break;
}
if (error) {
- remove_files(dir_sd, kobj, grp);
+ remove_files(parent, kobj, grp);
goto exit;
}
}
@@ -78,7 +76,7 @@ static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
break;
}
if (error)
- remove_files(dir_sd, kobj, grp);
+ remove_files(parent, kobj, grp);
}
exit:
return error;
@@ -88,7 +86,7 @@ exit:
static int internal_create_group(struct kobject *kobj, int update,
const struct attribute_group *grp)
{
- struct sysfs_dirent *sd;
+ struct kernfs_node *kn;
int error;
BUG_ON(!kobj || (!update && !kobj->sd));
@@ -102,18 +100,22 @@ static int internal_create_group(struct kobject *kobj, int update,
return -EINVAL;
}
if (grp->name) {
- error = sysfs_create_subdir(kobj, grp->name, &sd);
- if (error)
- return error;
+ kn = kernfs_create_dir(kobj->sd, grp->name,
+ S_IRWXU | S_IRUGO | S_IXUGO, kobj);
+ if (IS_ERR(kn)) {
+ if (PTR_ERR(kn) == -EEXIST)
+ sysfs_warn_dup(kobj->sd, grp->name);
+ return PTR_ERR(kn);
+ }
} else
- sd = kobj->sd;
- sysfs_get(sd);
- error = create_files(sd, kobj, grp, update);
+ kn = kobj->sd;
+ kernfs_get(kn);
+ error = create_files(kn, kobj, grp, update);
if (error) {
if (grp->name)
- sysfs_remove(sd);
+ kernfs_remove(kn);
}
- sysfs_put(sd);
+ kernfs_put(kn);
return error;
}
@@ -203,25 +205,27 @@ EXPORT_SYMBOL_GPL(sysfs_update_group);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp)
{
- struct sysfs_dirent *dir_sd = kobj->sd;
- struct sysfs_dirent *sd;
+ struct kernfs_node *parent = kobj->sd;
+ struct kernfs_node *kn;
if (grp->name) {
- sd = sysfs_get_dirent(dir_sd, grp->name);
- if (!sd) {
- WARN(!sd, KERN_WARNING
+ kn = kernfs_find_and_get(parent, grp->name);
+ if (!kn) {
+ WARN(!kn, KERN_WARNING
"sysfs group %p not found for kobject '%s'\n",
grp, kobject_name(kobj));
return;
}
- } else
- sd = sysfs_get(dir_sd);
+ } else {
+ kn = parent;
+ kernfs_get(kn);
+ }
- remove_files(sd, kobj, grp);
+ remove_files(kn, kobj, grp);
if (grp->name)
- sysfs_remove(sd);
+ kernfs_remove(kn);
- sysfs_put(sd);
+ kernfs_put(kn);
}
EXPORT_SYMBOL_GPL(sysfs_remove_group);
@@ -257,22 +261,22 @@ EXPORT_SYMBOL_GPL(sysfs_remove_groups);
int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
- struct sysfs_dirent *dir_sd;
+ struct kernfs_node *parent;
int error = 0;
struct attribute *const *attr;
int i;
- dir_sd = sysfs_get_dirent(kobj->sd, grp->name);
- if (!dir_sd)
+ parent = kernfs_find_and_get(kobj->sd, grp->name);
+ if (!parent)
return -ENOENT;
for ((i = 0, attr = grp->attrs); *attr && !error; (++i, ++attr))
- error = sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
+ error = sysfs_add_file(parent, *attr, false);
if (error) {
while (--i >= 0)
- sysfs_hash_and_remove(dir_sd, (*--attr)->name, NULL);
+ kernfs_remove_by_name(parent, (*--attr)->name);
}
- sysfs_put(dir_sd);
+ kernfs_put(parent);
return error;
}
@@ -286,14 +290,14 @@ EXPORT_SYMBOL_GPL(sysfs_merge_group);
void sysfs_unmerge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
- struct sysfs_dirent *dir_sd;
+ struct kernfs_node *parent;
struct attribute *const *attr;
- dir_sd = sysfs_get_dirent(kobj->sd, grp->name);
- if (dir_sd) {
+ parent = kernfs_find_and_get(kobj->sd, grp->name);
+ if (parent) {
for (attr = grp->attrs; *attr; ++attr)
- sysfs_hash_and_remove(dir_sd, (*attr)->name, NULL);
- sysfs_put(dir_sd);
+ kernfs_remove_by_name(parent, (*attr)->name);
+ kernfs_put(parent);
}
}
EXPORT_SYMBOL_GPL(sysfs_unmerge_group);
@@ -308,15 +312,15 @@ EXPORT_SYMBOL_GPL(sysfs_unmerge_group);
int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name)
{
- struct sysfs_dirent *dir_sd;
+ struct kernfs_node *parent;
int error = 0;
- dir_sd = sysfs_get_dirent(kobj->sd, group_name);
- if (!dir_sd)
+ parent = kernfs_find_and_get(kobj->sd, group_name);
+ if (!parent)
return -ENOENT;
- error = sysfs_create_link_sd(dir_sd, target, link_name);
- sysfs_put(dir_sd);
+ error = sysfs_create_link_sd(parent, target, link_name);
+ kernfs_put(parent);
return error;
}
@@ -331,12 +335,12 @@ EXPORT_SYMBOL_GPL(sysfs_add_link_to_group);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name)
{
- struct sysfs_dirent *dir_sd;
+ struct kernfs_node *parent;
- dir_sd = sysfs_get_dirent(kobj->sd, group_name);
- if (dir_sd) {
- sysfs_hash_and_remove(dir_sd, link_name, NULL);
- sysfs_put(dir_sd);
+ parent = kernfs_find_and_get(kobj->sd, group_name);
+ if (parent) {
+ kernfs_remove_by_name(parent, link_name);
+ kernfs_put(parent);
}
}
EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
deleted file mode 100644
index 1750f79..0000000
--- a/fs/sysfs/inode.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * fs/sysfs/inode.c - basic sysfs inode and dentry operations
- *
- * Copyright (c) 2001-3 Patrick Mochel
- * Copyright (c) 2007 SUSE Linux Products GmbH
- * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
- *
- * Please see Documentation/filesystems/sysfs.txt for more information.
- */
-
-#undef DEBUG
-
-#include <linux/pagemap.h>
-#include <linux/namei.h>
-#include <linux/backing-dev.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/xattr.h>
-#include <linux/security.h>
-#include "sysfs.h"
-
-static const struct address_space_operations sysfs_aops = {
- .readpage = simple_readpage,
- .write_begin = simple_write_begin,
- .write_end = simple_write_end,
-};
-
-static struct backing_dev_info sysfs_backing_dev_info = {
- .name = "sysfs",
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
-
-static const struct inode_operations sysfs_inode_operations = {
- .permission = sysfs_permission,
- .setattr = sysfs_setattr,
- .getattr = sysfs_getattr,
- .setxattr = sysfs_setxattr,
-};
-
-int __init sysfs_inode_init(void)
-{
- return bdi_init(&sysfs_backing_dev_info);
-}
-
-static struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
-{
- struct sysfs_inode_attrs *attrs;
- struct iattr *iattrs;
-
- attrs = kzalloc(sizeof(struct sysfs_inode_attrs), GFP_KERNEL);
- if (!attrs)
- return NULL;
- iattrs = &attrs->ia_iattr;
-
- /* assign default attributes */
- iattrs->ia_mode = sd->s_mode;
- iattrs->ia_uid = GLOBAL_ROOT_UID;
- iattrs->ia_gid = GLOBAL_ROOT_GID;
- iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME;
-
- return attrs;
-}
-
-int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr)
-{
- struct sysfs_inode_attrs *sd_attrs;
- struct iattr *iattrs;
- unsigned int ia_valid = iattr->ia_valid;
-
- sd_attrs = sd->s_iattr;
-
- if (!sd_attrs) {
- /* setting attributes for the first time, allocate now */
- sd_attrs = sysfs_init_inode_attrs(sd);
- if (!sd_attrs)
- return -ENOMEM;
- sd->s_iattr = sd_attrs;
- }
- /* attributes were changed at least once in past */
- iattrs = &sd_attrs->ia_iattr;
-
- if (ia_valid & ATTR_UID)
- iattrs->ia_uid = iattr->ia_uid;
- if (ia_valid & ATTR_GID)
- iattrs->ia_gid = iattr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- iattrs->ia_atime = iattr->ia_atime;
- if (ia_valid & ATTR_MTIME)
- iattrs->ia_mtime = iattr->ia_mtime;
- if (ia_valid & ATTR_CTIME)
- iattrs->ia_ctime = iattr->ia_ctime;
- if (ia_valid & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
- iattrs->ia_mode = sd->s_mode = mode;
- }
- return 0;
-}
-
-int sysfs_setattr(struct dentry *dentry, struct iattr *iattr)
-{
- struct inode *inode = dentry->d_inode;
- struct sysfs_dirent *sd = dentry->d_fsdata;
- int error;
-
- if (!sd)
- return -EINVAL;
-
- mutex_lock(&sysfs_mutex);
- error = inode_change_ok(inode, iattr);
- if (error)
- goto out;
-
- error = sysfs_sd_setattr(sd, iattr);
- if (error)
- goto out;
-
- /* this ignores size changes */
- setattr_copy(inode, iattr);
-
-out:
- mutex_unlock(&sysfs_mutex);
- return error;
-}
-
-static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata,
- u32 *secdata_len)
-{
- struct sysfs_inode_attrs *iattrs;
- void *old_secdata;
- size_t old_secdata_len;
-
- if (!sd->s_iattr) {
- sd->s_iattr = sysfs_init_inode_attrs(sd);
- if (!sd->s_iattr)
- return -ENOMEM;
- }
-
- iattrs = sd->s_iattr;
- old_secdata = iattrs->ia_secdata;
- old_secdata_len = iattrs->ia_secdata_len;
-
- iattrs->ia_secdata = *secdata;
- iattrs->ia_secdata_len = *secdata_len;
-
- *secdata = old_secdata;
- *secdata_len = old_secdata_len;
- return 0;
-}
-
-int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags)
-{
- struct sysfs_dirent *sd = dentry->d_fsdata;
- void *secdata;
- int error;
- u32 secdata_len = 0;
-
- if (!sd)
- return -EINVAL;
-
- if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
- const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
- error = security_inode_setsecurity(dentry->d_inode, suffix,
- value, size, flags);
- if (error)
- goto out;
- error = security_inode_getsecctx(dentry->d_inode,
- &secdata, &secdata_len);
- if (error)
- goto out;
-
- mutex_lock(&sysfs_mutex);
- error = sysfs_sd_setsecdata(sd, &secdata, &secdata_len);
- mutex_unlock(&sysfs_mutex);
-
- if (secdata)
- security_release_secctx(secdata, secdata_len);
- } else
- return -EINVAL;
-out:
- return error;
-}
-
-static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
-{
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-}
-
-static inline void set_inode_attr(struct inode *inode, struct iattr *iattr)
-{
- inode->i_uid = iattr->ia_uid;
- inode->i_gid = iattr->ia_gid;
- inode->i_atime = iattr->ia_atime;
- inode->i_mtime = iattr->ia_mtime;
- inode->i_ctime = iattr->ia_ctime;
-}
-
-static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode)
-{
- struct sysfs_inode_attrs *iattrs = sd->s_iattr;
-
- inode->i_mode = sd->s_mode;
- if (iattrs) {
- /* sysfs_dirent has non-default attributes
- * get them from persistent copy in sysfs_dirent
- */
- set_inode_attr(inode, &iattrs->ia_iattr);
- security_inode_notifysecctx(inode,
- iattrs->ia_secdata,
- iattrs->ia_secdata_len);
- }
-
- if (sysfs_type(sd) == SYSFS_DIR)
- set_nlink(inode, sd->s_dir.subdirs + 2);
-}
-
-int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
-{
- struct sysfs_dirent *sd = dentry->d_fsdata;
- struct inode *inode = dentry->d_inode;
-
- mutex_lock(&sysfs_mutex);
- sysfs_refresh_inode(sd, inode);
- mutex_unlock(&sysfs_mutex);
-
- generic_fillattr(inode, stat);
- return 0;
-}
-
-static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
-{
- struct bin_attribute *bin_attr;
-
- inode->i_private = sysfs_get(sd);
- inode->i_mapping->a_ops = &sysfs_aops;
- inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
- inode->i_op = &sysfs_inode_operations;
-
- set_default_inode_attr(inode, sd->s_mode);
- sysfs_refresh_inode(sd, inode);
-
- /* initialize inode according to type */
- switch (sysfs_type(sd)) {
- case SYSFS_DIR:
- inode->i_op = &sysfs_dir_inode_operations;
- inode->i_fop = &sysfs_dir_operations;
- break;
- case SYSFS_KOBJ_ATTR:
- inode->i_size = PAGE_SIZE;
- inode->i_fop = &sysfs_file_operations;
- break;
- case SYSFS_KOBJ_BIN_ATTR:
- bin_attr = sd->s_attr.bin_attr;
- inode->i_size = bin_attr->size;
- inode->i_fop = &sysfs_bin_operations;
- break;
- case SYSFS_KOBJ_LINK:
- inode->i_op = &sysfs_symlink_inode_operations;
- break;
- default:
- BUG();
- }
-
- unlock_new_inode(inode);
-}
-
-/**
- * sysfs_get_inode - get inode for sysfs_dirent
- * @sb: super block
- * @sd: sysfs_dirent to allocate inode for
- *
- * Get inode for @sd. If such inode doesn't exist, a new inode
- * is allocated and basics are initialized. New inode is
- * returned locked.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * Pointer to allocated inode on success, NULL on failure.
- */
-struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd)
-{
- struct inode *inode;
-
- inode = iget_locked(sb, sd->s_ino);
- if (inode && (inode->i_state & I_NEW))
- sysfs_init_inode(sd, inode);
-
- return inode;
-}
-
-/*
- * The sysfs_dirent serves as both an inode and a directory entry for sysfs.
- * To prevent the sysfs inode numbers from being freed prematurely we take a
- * reference to sysfs_dirent from the sysfs inode. A
- * super_operations.evict_inode() implementation is needed to drop that
- * reference upon inode destruction.
- */
-void sysfs_evict_inode(struct inode *inode)
-{
- struct sysfs_dirent *sd = inode->i_private;
-
- truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
- sysfs_put(sd);
-}
-
-int sysfs_permission(struct inode *inode, int mask)
-{
- struct sysfs_dirent *sd;
-
- if (mask & MAY_NOT_BLOCK)
- return -ECHILD;
-
- sd = inode->i_private;
-
- mutex_lock(&sysfs_mutex);
- sysfs_refresh_inode(sd, inode);
- mutex_unlock(&sysfs_mutex);
-
- return generic_permission(inode, mask);
-}
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 834ec2c..6211230 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -14,146 +14,41 @@
#include <linux/fs.h>
#include <linux/mount.h>
-#include <linux/pagemap.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/magic.h>
-#include <linux/slab.h>
#include <linux/user_namespace.h>
#include "sysfs.h"
-
-static struct vfsmount *sysfs_mnt;
-struct kmem_cache *sysfs_dir_cachep;
-
-static const struct super_operations sysfs_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
- .evict_inode = sysfs_evict_inode,
-};
-
-struct sysfs_dirent sysfs_root = {
- .s_name = "",
- .s_count = ATOMIC_INIT(1),
- .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
- .s_mode = S_IFDIR | S_IRUGO | S_IXUGO,
- .s_ino = 1,
-};
-
-static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct inode *inode;
- struct dentry *root;
-
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = SYSFS_MAGIC;
- sb->s_op = &sysfs_ops;
- sb->s_time_gran = 1;
-
- /* get root inode, initialize and unlock it */
- mutex_lock(&sysfs_mutex);
- inode = sysfs_get_inode(sb, &sysfs_root);
- mutex_unlock(&sysfs_mutex);
- if (!inode) {
- pr_debug("sysfs: could not get root inode\n");
- return -ENOMEM;
- }
-
- /* instantiate and link root dentry */
- root = d_make_root(inode);
- if (!root) {
- pr_debug("%s: could not get root dentry!\n", __func__);
- return -ENOMEM;
- }
- root->d_fsdata = &sysfs_root;
- sb->s_root = root;
- sb->s_d_op = &sysfs_dentry_ops;
- return 0;
-}
-
-static int sysfs_test_super(struct super_block *sb, void *data)
-{
- struct sysfs_super_info *sb_info = sysfs_info(sb);
- struct sysfs_super_info *info = data;
- enum kobj_ns_type type;
- int found = 1;
-
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
- if (sb_info->ns[type] != info->ns[type])
- found = 0;
- }
- return found;
-}
-
-static int sysfs_set_super(struct super_block *sb, void *data)
-{
- int error;
- error = set_anon_super(sb, data);
- if (!error)
- sb->s_fs_info = data;
- return error;
-}
-
-static void free_sysfs_super_info(struct sysfs_super_info *info)
-{
- int type;
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
- kobj_ns_drop(type, info->ns[type]);
- kfree(info);
-}
+static struct kernfs_root *sysfs_root;
+struct kernfs_node *sysfs_root_kn;
static struct dentry *sysfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- struct sysfs_super_info *info;
- enum kobj_ns_type type;
- struct super_block *sb;
- int error;
+ struct dentry *root;
+ void *ns;
if (!(flags & MS_KERNMOUNT)) {
if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
return ERR_PTR(-EPERM);
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
- if (!kobj_ns_current_may_mount(type))
- return ERR_PTR(-EPERM);
- }
- }
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
- info->ns[type] = kobj_ns_grab_current(type);
-
- sb = sget(fs_type, sysfs_test_super, sysfs_set_super, flags, info);
- if (IS_ERR(sb) || sb->s_fs_info != info)
- free_sysfs_super_info(info);
- if (IS_ERR(sb))
- return ERR_CAST(sb);
- if (!sb->s_root) {
- error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
- if (error) {
- deactivate_locked_super(sb);
- return ERR_PTR(error);
- }
- sb->s_flags |= MS_ACTIVE;
+ if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
+ return ERR_PTR(-EPERM);
}
- return dget(sb->s_root);
+ ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
+ root = kernfs_mount_ns(fs_type, flags, sysfs_root, ns);
+ if (IS_ERR(root))
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
+ return root;
}
static void sysfs_kill_sb(struct super_block *sb)
{
- struct sysfs_super_info *info = sysfs_info(sb);
- /* Remove the superblock from fs_supers/s_instances
- * so we can't find it, before freeing sysfs_super_info.
- */
- kill_anon_super(sb);
- free_sysfs_super_info(info);
+ void *ns = (void *)kernfs_super_ns(sb);
+
+ kernfs_kill_sb(sb);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
}
static struct file_system_type sysfs_fs_type = {
@@ -165,48 +60,19 @@ static struct file_system_type sysfs_fs_type = {
int __init sysfs_init(void)
{
- int err = -ENOMEM;
+ int err;
- sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
- sizeof(struct sysfs_dirent),
- 0, 0, NULL);
- if (!sysfs_dir_cachep)
- goto out;
+ sysfs_root = kernfs_create_root(NULL, NULL);
+ if (IS_ERR(sysfs_root))
+ return PTR_ERR(sysfs_root);
- err = sysfs_inode_init();
- if (err)
- goto out_err;
+ sysfs_root_kn = sysfs_root->kn;
err = register_filesystem(&sysfs_fs_type);
- if (!err) {
- sysfs_mnt = kern_mount(&sysfs_fs_type);
- if (IS_ERR(sysfs_mnt)) {
- printk(KERN_ERR "sysfs: could not mount!\n");
- err = PTR_ERR(sysfs_mnt);
- sysfs_mnt = NULL;
- unregister_filesystem(&sysfs_fs_type);
- goto out_err;
- }
- } else
- goto out_err;
-out:
- return err;
-out_err:
- kmem_cache_destroy(sysfs_dir_cachep);
- sysfs_dir_cachep = NULL;
- goto out;
-}
-
-#undef sysfs_get
-struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
-{
- return __sysfs_get(sd);
-}
-EXPORT_SYMBOL_GPL(sysfs_get);
+ if (err) {
+ kernfs_destroy_root(sysfs_root);
+ return err;
+ }
-#undef sysfs_put
-void sysfs_put(struct sysfs_dirent *sd)
-{
- __sysfs_put(sd);
+ return 0;
}
-EXPORT_SYMBOL_GPL(sysfs_put);
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 3ae3f1b..aecb15f 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -11,109 +11,73 @@
*/
#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/mount.h>
#include <linux/module.h>
#include <linux/kobject.h>
-#include <linux/namei.h>
#include <linux/mutex.h>
#include <linux/security.h>
#include "sysfs.h"
-static int sysfs_do_create_link_sd(struct sysfs_dirent *parent_sd,
- struct kobject *target,
+static int sysfs_do_create_link_sd(struct kernfs_node *parent,
+ struct kobject *target_kobj,
const char *name, int warn)
{
- struct sysfs_dirent *target_sd = NULL;
- struct sysfs_dirent *sd = NULL;
- struct sysfs_addrm_cxt acxt;
- enum kobj_ns_type ns_type;
- int error;
+ struct kernfs_node *kn, *target = NULL;
- BUG_ON(!name || !parent_sd);
+ BUG_ON(!name || !parent);
/*
- * We don't own @target and it may be removed at any time.
+ * We don't own @target_kobj and it may be removed at any time.
* Synchronize using sysfs_symlink_target_lock. See
* sysfs_remove_dir() for details.
*/
spin_lock(&sysfs_symlink_target_lock);
- if (target->sd)
- target_sd = sysfs_get(target->sd);
+ if (target_kobj->sd) {
+ target = target_kobj->sd;
+ kernfs_get(target);
+ }
spin_unlock(&sysfs_symlink_target_lock);
- error = -ENOENT;
- if (!target_sd)
- goto out_put;
-
- error = -ENOMEM;
- sd = sysfs_new_dirent(name, S_IFLNK|S_IRWXUGO, SYSFS_KOBJ_LINK);
- if (!sd)
- goto out_put;
+ if (!target)
+ return -ENOENT;
- ns_type = sysfs_ns_type(parent_sd);
- if (ns_type)
- sd->s_ns = target_sd->s_ns;
- sd->s_symlink.target_sd = target_sd;
- target_sd = NULL; /* reference is now owned by the symlink */
-
- sysfs_addrm_start(&acxt);
- /* Symlinks must be between directories with the same ns_type */
- if (!ns_type ||
- (ns_type == sysfs_ns_type(sd->s_symlink.target_sd->s_parent))) {
- if (warn)
- error = sysfs_add_one(&acxt, sd, parent_sd);
- else
- error = __sysfs_add_one(&acxt, sd, parent_sd);
- } else {
- error = -EINVAL;
- WARN(1, KERN_WARNING
- "sysfs: symlink across ns_types %s/%s -> %s/%s\n",
- parent_sd->s_name,
- sd->s_name,
- sd->s_symlink.target_sd->s_parent->s_name,
- sd->s_symlink.target_sd->s_name);
- }
- sysfs_addrm_finish(&acxt);
+ kn = kernfs_create_link(parent, name, target);
+ kernfs_put(target);
- if (error)
- goto out_put;
+ if (!IS_ERR(kn))
+ return 0;
- return 0;
-
- out_put:
- sysfs_put(target_sd);
- sysfs_put(sd);
- return error;
+ if (warn && PTR_ERR(kn) == -EEXIST)
+ sysfs_warn_dup(parent, name);
+ return PTR_ERR(kn);
}
/**
* sysfs_create_link_sd - create symlink to a given object.
- * @sd: directory we're creating the link in.
+ * @kn: directory we're creating the link in.
* @target: object we're pointing to.
* @name: name of the symlink.
*/
-int sysfs_create_link_sd(struct sysfs_dirent *sd, struct kobject *target,
+int sysfs_create_link_sd(struct kernfs_node *kn, struct kobject *target,
const char *name)
{
- return sysfs_do_create_link_sd(sd, target, name, 1);
+ return sysfs_do_create_link_sd(kn, target, name, 1);
}
static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
const char *name, int warn)
{
- struct sysfs_dirent *parent_sd = NULL;
+ struct kernfs_node *parent = NULL;
if (!kobj)
- parent_sd = &sysfs_root;
+ parent = sysfs_root_kn;
else
- parent_sd = kobj->sd;
+ parent = kobj->sd;
- if (!parent_sd)
+ if (!parent)
return -EFAULT;
- return sysfs_do_create_link_sd(parent_sd, target, name, warn);
+ return sysfs_do_create_link_sd(parent, target, name, warn);
}
/**
@@ -164,10 +128,10 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
* sysfs_remove_dir() for details.
*/
spin_lock(&sysfs_symlink_target_lock);
- if (targ->sd && sysfs_ns_type(kobj->sd))
- ns = targ->sd->s_ns;
+ if (targ->sd && kernfs_ns_enabled(kobj->sd))
+ ns = targ->sd->ns;
spin_unlock(&sysfs_symlink_target_lock);
- sysfs_hash_and_remove(kobj->sd, name, ns);
+ kernfs_remove_by_name_ns(kobj->sd, name, ns);
}
/**
@@ -177,14 +141,14 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
*/
void sysfs_remove_link(struct kobject *kobj, const char *name)
{
- struct sysfs_dirent *parent_sd = NULL;
+ struct kernfs_node *parent = NULL;
if (!kobj)
- parent_sd = &sysfs_root;
+ parent = sysfs_root_kn;
else
- parent_sd = kobj->sd;
+ parent = kobj->sd;
- sysfs_hash_and_remove(parent_sd, name, NULL);
+ kernfs_remove_by_name(parent, name);
}
EXPORT_SYMBOL_GPL(sysfs_remove_link);
@@ -201,130 +165,33 @@ EXPORT_SYMBOL_GPL(sysfs_remove_link);
int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *targ,
const char *old, const char *new, const void *new_ns)
{
- struct sysfs_dirent *parent_sd, *sd = NULL;
+ struct kernfs_node *parent, *kn = NULL;
const void *old_ns = NULL;
int result;
if (!kobj)
- parent_sd = &sysfs_root;
+ parent = sysfs_root_kn;
else
- parent_sd = kobj->sd;
+ parent = kobj->sd;
if (targ->sd)
- old_ns = targ->sd->s_ns;
+ old_ns = targ->sd->ns;
result = -ENOENT;
- sd = sysfs_get_dirent_ns(parent_sd, old, old_ns);
- if (!sd)
+ kn = kernfs_find_and_get_ns(parent, old, old_ns);
+ if (!kn)
goto out;
result = -EINVAL;
- if (sysfs_type(sd) != SYSFS_KOBJ_LINK)
+ if (kernfs_type(kn) != KERNFS_LINK)
goto out;
- if (sd->s_symlink.target_sd->s_dir.kobj != targ)
+ if (kn->symlink.target_kn->priv != targ)
goto out;
- result = sysfs_rename(sd, parent_sd, new, new_ns);
+ result = kernfs_rename_ns(kn, parent, new, new_ns);
out:
- sysfs_put(sd);
+ kernfs_put(kn);
return result;
}
EXPORT_SYMBOL_GPL(sysfs_rename_link_ns);
-
-static int sysfs_get_target_path(struct sysfs_dirent *parent_sd,
- struct sysfs_dirent *target_sd, char *path)
-{
- struct sysfs_dirent *base, *sd;
- char *s = path;
- int len = 0;
-
- /* go up to the root, stop at the base */
- base = parent_sd;
- while (base->s_parent) {
- sd = target_sd->s_parent;
- while (sd->s_parent && base != sd)
- sd = sd->s_parent;
-
- if (base == sd)
- break;
-
- strcpy(s, "../");
- s += 3;
- base = base->s_parent;
- }
-
- /* determine end of target string for reverse fillup */
- sd = target_sd;
- while (sd->s_parent && sd != base) {
- len += strlen(sd->s_name) + 1;
- sd = sd->s_parent;
- }
-
- /* check limits */
- if (len < 2)
- return -EINVAL;
- len--;
- if ((s - path) + len > PATH_MAX)
- return -ENAMETOOLONG;
-
- /* reverse fillup of target string from target to base */
- sd = target_sd;
- while (sd->s_parent && sd != base) {
- int slen = strlen(sd->s_name);
-
- len -= slen;
- strncpy(s + len, sd->s_name, slen);
- if (len)
- s[--len] = '/';
-
- sd = sd->s_parent;
- }
-
- return 0;
-}
-
-static int sysfs_getlink(struct dentry *dentry, char *path)
-{
- struct sysfs_dirent *sd = dentry->d_fsdata;
- struct sysfs_dirent *parent_sd = sd->s_parent;
- struct sysfs_dirent *target_sd = sd->s_symlink.target_sd;
- int error;
-
- mutex_lock(&sysfs_mutex);
- error = sysfs_get_target_path(parent_sd, target_sd, path);
- mutex_unlock(&sysfs_mutex);
-
- return error;
-}
-
-static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
- int error = -ENOMEM;
- unsigned long page = get_zeroed_page(GFP_KERNEL);
- if (page) {
- error = sysfs_getlink(dentry, (char *) page);
- if (error < 0)
- free_page((unsigned long)page);
- }
- nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
- return NULL;
-}
-
-static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
- void *cookie)
-{
- char *page = nd_get_link(nd);
- if (!IS_ERR(page))
- free_page((unsigned long)page);
-}
-
-const struct inode_operations sysfs_symlink_inode_operations = {
- .setxattr = sysfs_setxattr,
- .readlink = generic_readlink,
- .follow_link = sysfs_follow_link,
- .put_link = sysfs_put_link,
- .setattr = sysfs_setattr,
- .getattr = sysfs_getattr,
- .permission = sysfs_permission,
-};
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 0af09fb..0e2f1cc 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -8,248 +8,36 @@
* This file is released under the GPLv2.
*/
-#include <linux/lockdep.h>
-#include <linux/kobject_ns.h>
-#include <linux/fs.h>
-#include <linux/rbtree.h>
+#ifndef __SYSFS_INTERNAL_H
+#define __SYSFS_INTERNAL_H
-struct sysfs_open_dirent;
-
-/* type-specific structures for sysfs_dirent->s_* union members */
-struct sysfs_elem_dir {
- struct kobject *kobj;
-
- unsigned long subdirs;
- /* children rbtree starts here and goes through sd->s_rb */
- struct rb_root children;
-};
-
-struct sysfs_elem_symlink {
- struct sysfs_dirent *target_sd;
-};
-
-struct sysfs_elem_attr {
- union {
- struct attribute *attr;
- struct bin_attribute *bin_attr;
- };
- struct sysfs_open_dirent *open;
-};
-
-struct sysfs_inode_attrs {
- struct iattr ia_iattr;
- void *ia_secdata;
- u32 ia_secdata_len;
-};
-
-/*
- * sysfs_dirent - the building block of sysfs hierarchy. Each and
- * every sysfs node is represented by single sysfs_dirent.
- *
- * As long as s_count reference is held, the sysfs_dirent itself is
- * accessible. Dereferencing s_elem or any other outer entity
- * requires s_active reference.
- */
-struct sysfs_dirent {
- atomic_t s_count;
- atomic_t s_active;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
- struct sysfs_dirent *s_parent;
- const char *s_name;
-
- struct rb_node s_rb;
-
- union {
- struct completion *completion;
- struct sysfs_dirent *removed_list;
- } u;
-
- const void *s_ns; /* namespace tag */
- unsigned int s_hash; /* ns + name hash */
- union {
- struct sysfs_elem_dir s_dir;
- struct sysfs_elem_symlink s_symlink;
- struct sysfs_elem_attr s_attr;
- };
-
- unsigned short s_flags;
- umode_t s_mode;
- unsigned int s_ino;
- struct sysfs_inode_attrs *s_iattr;
-};
-
-#define SD_DEACTIVATED_BIAS INT_MIN
-
-#define SYSFS_TYPE_MASK 0x00ff
-#define SYSFS_DIR 0x0001
-#define SYSFS_KOBJ_ATTR 0x0002
-#define SYSFS_KOBJ_BIN_ATTR 0x0004
-#define SYSFS_KOBJ_LINK 0x0008
-#define SYSFS_COPY_NAME (SYSFS_DIR | SYSFS_KOBJ_LINK)
-#define SYSFS_ACTIVE_REF (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR)
-
-/* identify any namespace tag on sysfs_dirents */
-#define SYSFS_NS_TYPE_MASK 0xf00
-#define SYSFS_NS_TYPE_SHIFT 8
-
-#define SYSFS_FLAG_MASK ~(SYSFS_NS_TYPE_MASK|SYSFS_TYPE_MASK)
-#define SYSFS_FLAG_REMOVED 0x02000
-
-static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
-{
- return sd->s_flags & SYSFS_TYPE_MASK;
-}
-
-/*
- * Return any namespace tags on this dirent.
- * enum kobj_ns_type is defined in linux/kobject.h
- */
-static inline enum kobj_ns_type sysfs_ns_type(struct sysfs_dirent *sd)
-{
- return (sd->s_flags & SYSFS_NS_TYPE_MASK) >> SYSFS_NS_TYPE_SHIFT;
-}
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
-#define sysfs_dirent_init_lockdep(sd) \
-do { \
- struct attribute *attr = sd->s_attr.attr; \
- struct lock_class_key *key = attr->key; \
- if (!key) \
- key = &attr->skey; \
- \
- lockdep_init_map(&sd->dep_map, "s_active", key, 0); \
-} while (0)
-
-/* Test for attributes that want to ignore lockdep for read-locking */
-static inline bool sysfs_ignore_lockdep(struct sysfs_dirent *sd)
-{
- int type = sysfs_type(sd);
-
- return (type == SYSFS_KOBJ_ATTR || type == SYSFS_KOBJ_BIN_ATTR) &&
- sd->s_attr.attr->ignore_lockdep;
-}
-
-#else
-
-#define sysfs_dirent_init_lockdep(sd) do {} while (0)
-
-static inline bool sysfs_ignore_lockdep(struct sysfs_dirent *sd)
-{
- return true;
-}
-
-#endif
-
-/*
- * Context structure to be used while adding/removing nodes.
- */
-struct sysfs_addrm_cxt {
- struct sysfs_dirent *removed;
-};
+#include <linux/sysfs.h>
/*
* mount.c
*/
-
-/*
- * Each sb is associated with a set of namespace tags (i.e.
- * the network namespace of the task which mounted this sysfs
- * instance).
- */
-struct sysfs_super_info {
- void *ns[KOBJ_NS_TYPES];
-};
-#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
-extern struct sysfs_dirent sysfs_root;
-extern struct kmem_cache *sysfs_dir_cachep;
+extern struct kernfs_node *sysfs_root_kn;
/*
* dir.c
*/
-extern struct mutex sysfs_mutex;
extern spinlock_t sysfs_symlink_target_lock;
-extern const struct dentry_operations sysfs_dentry_ops;
-
-extern const struct file_operations sysfs_dir_operations;
-extern const struct inode_operations sysfs_dir_inode_operations;
-struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd);
-void sysfs_put_active(struct sysfs_dirent *sd);
-void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt);
-void sysfs_warn_dup(struct sysfs_dirent *parent, const char *name);
-int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
- struct sysfs_dirent *parent_sd);
-int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
- struct sysfs_dirent *parent_sd);
-void sysfs_remove(struct sysfs_dirent *sd);
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name,
- const void *ns);
-void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
-
-struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
- const unsigned char *name,
- const void *ns);
-struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type);
-
-void release_sysfs_dirent(struct sysfs_dirent *sd);
-
-int sysfs_create_subdir(struct kobject *kobj, const char *name,
- struct sysfs_dirent **p_sd);
-
-int sysfs_rename(struct sysfs_dirent *sd, struct sysfs_dirent *new_parent_sd,
- const char *new_name, const void *new_ns);
-
-static inline struct sysfs_dirent *__sysfs_get(struct sysfs_dirent *sd)
-{
- if (sd) {
- WARN_ON(!atomic_read(&sd->s_count));
- atomic_inc(&sd->s_count);
- }
- return sd;
-}
-#define sysfs_get(sd) __sysfs_get(sd)
-
-static inline void __sysfs_put(struct sysfs_dirent *sd)
-{
- if (sd && atomic_dec_and_test(&sd->s_count))
- release_sysfs_dirent(sd);
-}
-#define sysfs_put(sd) __sysfs_put(sd)
-
-/*
- * inode.c
- */
-struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd);
-void sysfs_evict_inode(struct inode *inode);
-int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr);
-int sysfs_permission(struct inode *inode, int mask);
-int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
-int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat);
-int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags);
-int sysfs_inode_init(void);
+void sysfs_warn_dup(struct kernfs_node *parent, const char *name);
/*
* file.c
*/
-extern const struct file_operations sysfs_file_operations;
-extern const struct file_operations sysfs_bin_operations;
-
-int sysfs_add_file(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type);
-
-int sysfs_add_file_mode_ns(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type,
+int sysfs_add_file(struct kernfs_node *parent,
+ const struct attribute *attr, bool is_bin);
+int sysfs_add_file_mode_ns(struct kernfs_node *parent,
+ const struct attribute *attr, bool is_bin,
umode_t amode, const void *ns);
-void sysfs_unmap_bin_file(struct sysfs_dirent *sd);
/*
* symlink.c
*/
-extern const struct inode_operations sysfs_symlink_inode_operations;
-int sysfs_create_link_sd(struct sysfs_dirent *sd, struct kobject *target,
+int sysfs_create_link_sd(struct kernfs_node *kn, struct kobject *target,
const char *name);
+
+#endif /* __SYSFS_INTERNAL_H */
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index 739e0a52..5549d69 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -110,7 +110,7 @@ xfs_attr3_rmt_verify(
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
return false;
if (be32_to_cpu(rmt->rm_offset) +
- be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
+ be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX)
return false;
if (rmt->rm_owner == 0)
return false;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 1394106..82e0dab 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -287,6 +287,7 @@ xfs_bmapi_allocate(
INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
queue_work(xfs_alloc_wq, &args->work);
wait_for_completion(&done);
+ destroy_work_on_stack(&args->work);
return args->result;
}
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c602c77..ddabed1 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -169,7 +169,8 @@ struct acpi_device_flags {
u32 ejectable:1;
u32 power_manageable:1;
u32 match_driver:1;
- u32 reserved:27;
+ u32 no_hotplug:1;
+ u32 reserved:26;
};
/* File System */
@@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
+void acpi_bus_no_hotplug(acpi_handle handle);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 639d7a4..6f692f8 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -1,4 +1,5 @@
-/* Generic barrier definitions, based on MN10300 definitions.
+/*
+ * Generic barrier definitions, originally based on MN10300 definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
@@ -16,35 +17,65 @@
#ifndef __ASSEMBLY__
-#define nop() asm volatile ("nop")
+#include <linux/compiler.h>
+
+#ifndef nop
+#define nop() asm volatile ("nop")
+#endif
/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
+ * Force strict CPU ordering. And yes, this is required on UP too when we're
+ * talking to devices.
*
- * This implementation only contains a compiler barrier.
+ * Fall back to compiler barriers if nothing better is provided.
*/
-#define mb() asm volatile ("": : :"memory")
+#ifndef mb
+#define mb() barrier()
+#endif
+
+#ifndef rmb
#define rmb() mb()
-#define wmb() asm volatile ("": : :"memory")
+#endif
+
+#ifndef wmb
+#define wmb() mb()
+#endif
+
+#ifndef read_barrier_depends
+#define read_barrier_depends() do { } while (0)
+#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#endif
+
+#ifndef set_mb
+#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#endif
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
-#define read_barrier_depends() do {} while (0)
-#define smp_read_barrier_depends() do {} while (0)
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 87578c1..49376ae 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -600,7 +600,7 @@
{0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
- {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5..3e0fbe4 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 27b1bcf..86c12c9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -1,9 +1,35 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
-extern void local_bh_disable(void);
+#include <linux/preempt.h>
+#include <linux/preempt_mask.h>
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+#else
+static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+ preempt_count_add(cnt);
+ barrier();
+}
+#endif
+
+static inline void local_bh_disable(void)
+{
+ __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
+
extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+static inline void local_bh_enable_ip(unsigned long ip)
+{
+ __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
+}
+
+static inline void local_bh_enable(void)
+{
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
#endif /* _LINUX_BH_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 92669cd..fe7a686 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/component.h b/include/linux/component.h
new file mode 100644
index 0000000..6887018
--- /dev/null
+++ b/include/linux/component.h
@@ -0,0 +1,32 @@
+#ifndef COMPONENT_H
+#define COMPONENT_H
+
+struct device;
+
+struct component_ops {
+ int (*bind)(struct device *, struct device *, void *);
+ void (*unbind)(struct device *, struct device *, void *);
+};
+
+int component_add(struct device *, const struct component_ops *);
+void component_del(struct device *, const struct component_ops *);
+
+int component_bind_all(struct device *, void *);
+void component_unbind_all(struct device *, void *);
+
+struct master;
+
+struct component_master_ops {
+ int (*add_components)(struct device *, struct master *);
+ int (*bind)(struct device *);
+ void (*unbind)(struct device *);
+};
+
+int component_master_add(struct device *, const struct component_master_ops *);
+void component_master_del(struct device *,
+ const struct component_master_ops *);
+
+int component_master_add_child(struct master *master,
+ int (*compare)(struct device *, void *), void *compare_data);
+
+#endif
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 1581587..37b81bd 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
static inline void user_enter(void)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
context_tracking_user_enter();
}
static inline void user_exit(void)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
context_tracking_user_exit();
}
@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (static_key_false(&context_tracking_enabled)) {
+ if (context_tracking_is_enabled()) {
if (prev_ctx == IN_USER)
context_tracking_user_enter();
}
@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
__context_tracking_task_switch(prev, next);
}
#else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 0f1979d..97a8122 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -22,15 +22,20 @@ struct context_tracking {
extern struct static_key context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_in_user(void)
+static inline bool context_tracking_is_enabled(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return static_key_false(&context_tracking_enabled);
}
-static inline bool context_tracking_active(void)
+static inline bool context_tracking_cpu_is_enabled(void)
{
return __this_cpu_read(context_tracking.active);
}
+
+static inline bool context_tracking_in_user(void)
+{
+ return __this_cpu_read(context_tracking.state) == IN_USER;
+}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index fe68a5a..7032518 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -6,6 +6,8 @@
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <asm/pgtable.h> /* for pgprot_t */
+
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
diff --git a/include/linux/edac.h b/include/linux/edac.h
index dbdffe8..8e6c20a 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -35,6 +35,34 @@ extern void edac_atomic_assert_error(void);
extern struct bus_type *edac_get_sysfs_subsys(void);
extern void edac_put_sysfs_subsys(void);
+enum {
+ EDAC_REPORTING_ENABLED,
+ EDAC_REPORTING_DISABLED,
+ EDAC_REPORTING_FORCE
+};
+
+extern int edac_report_status;
+#ifdef CONFIG_EDAC
+static inline int get_edac_report_status(void)
+{
+ return edac_report_status;
+}
+
+static inline void set_edac_report_status(int new)
+{
+ edac_report_status = new;
+}
+#else
+static inline int get_edac_report_status(void)
+{
+ return EDAC_REPORTING_DISABLED;
+}
+
+static inline void set_edac_report_status(int new)
+{
+}
+#endif
+
static inline void opstate_init(void)
{
switch (edac_op_state) {
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 11ce678..0a819e7 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -556,6 +556,9 @@ extern struct efi {
unsigned long hcdp; /* HCDP table */
unsigned long uga; /* UGA table */
unsigned long uv_systab; /* UV system table */
+ unsigned long fw_vendor; /* fw_vendor */
+ unsigned long runtime; /* runtime table */
+ unsigned long config_table; /* config tables */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -653,6 +656,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
#define EFI_64BIT 5 /* Is the firmware 64-bit? */
+#define EFI_ARCH_1 6 /* First arch-specific bit */
#ifdef CONFIG_EFI
# ifdef CONFIG_X86
@@ -872,4 +876,17 @@ int efivars_sysfs_init(void);
#endif /* CONFIG_EFI_VARS */
+#ifdef CONFIG_EFI_RUNTIME_MAP
+int efi_runtime_map_init(struct kobject *);
+void efi_runtime_map_setup(void *, int, u32);
+#else
+static inline int efi_runtime_map_init(struct kobject *kobj)
+{
+ return 0;
+}
+
+static inline void
+efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
+#endif
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 4195810..8900fdf 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -51,6 +51,7 @@ struct gpio_extcon_platform_data {
/* if NULL, "0" or "1" will be printed */
const char *state_on;
const char *state_off;
+ bool check_on_resume;
};
#endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index e154c10..5952933 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -68,4 +68,11 @@ static inline void release_firmware(const struct firmware *fw)
#endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+int request_firmware_direct(const struct firmware **fw, const char *name,
+ struct device *device);
+#else
+#define request_firmware_direct request_firmware
+#endif
+
#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index d9cf963..12d5f97 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -5,6 +5,7 @@
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
+#include <asm/hardirq.h>
extern void synchronize_irq(unsigned int irq);
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 8323775..beaf965 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -23,22 +23,26 @@
/* Accel 3D (200073) */
#define HID_USAGE_SENSOR_ACCEL_3D 0x200073
+#define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452
#define HID_USAGE_SENSOR_ACCEL_X_AXIS 0x200453
#define HID_USAGE_SENSOR_ACCEL_Y_AXIS 0x200454
#define HID_USAGE_SENSOR_ACCEL_Z_AXIS 0x200455
/* ALS (200041) */
#define HID_USAGE_SENSOR_ALS 0x200041
+#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
/* Gyro 3D: (200076) */
#define HID_USAGE_SENSOR_GYRO_3D 0x200076
+#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456
#define HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS 0x200457
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
/* ORIENTATION: Compass 3D: (200083) */
#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
+#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470
#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING 0x200471
#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_X 0x200472
#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Y 0x200473
@@ -54,10 +58,14 @@
#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Y 0x20047B
#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Z 0x20047C
#define HID_USAGE_SENSOR_ORIENT_DISTANCE_OUT_OF_RANGE 0x20047D
+
+/* ORIENTATION: Inclinometer 3D: (200086) */
+#define HID_USAGE_SENSOR_INCLINOMETER_3D 0x200086
#define HID_USAGE_SENSOR_ORIENT_TILT 0x20047E
#define HID_USAGE_SENSOR_ORIENT_TILT_X 0x20047F
#define HID_USAGE_SENSOR_ORIENT_TILT_Y 0x200480
#define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481
+
#define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482
#define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483
#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484
@@ -117,6 +125,10 @@
#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
+/* Per data field properties */
+#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+
/* Power state enumerations */
#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00
#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index eff50e0..d9c8dbd3 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
static inline struct i2c_adapter *
i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
{
-#if IS_ENABLED(I2C_MUX)
+#if IS_ENABLED(CONFIG_I2C_MUX)
struct device *parent = adapter->dev.parent;
if (parent != NULL && parent->type == &i2c_adapter_type)
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 15607b4..5193927 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -21,6 +21,8 @@ struct iio_buffer;
* struct iio_buffer_access_funcs - access functions for buffers.
* @store_to: actually store stuff to the buffer
* @read_first_n: try to get a specified number of bytes (must exist)
+ * @data_available: indicates whether data for reading from the buffer is
+ * available.
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @get_bytes_per_datum:get current bytes per datum
@@ -43,6 +45,7 @@ struct iio_buffer_access_funcs {
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
char __user *buf);
+ bool (*data_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 5dab2c4..8bbd7bc 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -46,10 +46,6 @@ struct iio_event_data {
((u16)chan))
-#define IIO_EV_DIR_MAX 4
-#define IIO_EV_BIT(type, direction) \
- (1 << (type*IIO_EV_DIR_MAX + direction))
-
/**
* IIO_MOD_EVENT_CODE() - create event identifier for modified channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 256a90a..75a8a20 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -185,7 +185,6 @@ struct iio_event_spec {
* by all channels of the same direction.
* @info_mask_shared_by_all: What information is to be exported that is shared
* by all channels.
- * @event_mask: What events can this channel produce.
* @event_spec: Array of events which should be registered for this
* channel.
* @num_event_specs: Size of the event_spec array.
@@ -226,7 +225,6 @@ struct iio_chan_spec {
long info_mask_shared_by_type;
long info_mask_shared_by_dir;
long info_mask_shared_by_all;
- long event_mask;
const struct iio_event_spec *event_spec;
unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
@@ -307,16 +305,8 @@ struct iio_dev;
* returns IIO_VAL_INT_PLUS_MICRO.
* @read_event_config: find out if the event is enabled.
* @write_event_config: set if the event is enabled.
- * @read_event_value: read a value associated with the event. Meaning
- * is event dependant. event_code specifies which event.
- * @write_event_value: write the value associated with the event.
- * Meaning is event dependent.
- * @read_event_config_new: find out if the event is enabled. New style interface.
- * @write_event_config_new: set if the event is enabled. New style interface.
- * @read_event_value_new: read a configuration value associated with the event.
- * New style interface.
- * @write_event_value_new: write a configuration value for the event. New style
- * interface.
+ * @read_event_value: read a configuration value associated with the event.
+ * @write_event_value: write a configuration value for the event.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
* @update_scan_mode: function to configure device and scan buffer when
@@ -345,37 +335,23 @@ struct iio_info {
long mask);
int (*read_event_config)(struct iio_dev *indio_dev,
- u64 event_code);
-
- int (*write_event_config)(struct iio_dev *indio_dev,
- u64 event_code,
- int state);
-
- int (*read_event_value)(struct iio_dev *indio_dev,
- u64 event_code,
- int *val);
- int (*write_event_value)(struct iio_dev *indio_dev,
- u64 event_code,
- int val);
-
- int (*read_event_config_new)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir);
- int (*write_event_config_new)(struct iio_dev *indio_dev,
+ int (*write_event_config)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
int state);
- int (*read_event_value_new)(struct iio_dev *indio_dev,
+ int (*read_event_value)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info, int *val, int *val2);
- int (*write_event_value_new)(struct iio_dev *indio_dev,
+ int (*write_event_value)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
@@ -490,32 +466,12 @@ struct iio_dev {
#endif
};
-/**
- * iio_find_channel_from_si() - get channel from its scan index
- * @indio_dev: device
- * @si: scan index to match
- */
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
-
-/**
- * iio_device_register() - register a device with the IIO subsystem
- * @indio_dev: Device structure filled by the device driver
- **/
int iio_device_register(struct iio_dev *indio_dev);
-
-/**
- * iio_device_unregister() - unregister a device from the IIO subsystem
- * @indio_dev: Device structure representing the device.
- **/
void iio_device_unregister(struct iio_dev *indio_dev);
-
-/**
- * iio_push_event() - try to add event to the list for userspace reading
- * @indio_dev: IIO device structure
- * @ev_code: What event
- * @timestamp: When the event occurred
- **/
+int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
+void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
extern struct bus_type iio_bus_type;
@@ -579,10 +535,6 @@ static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
/* Can we make this smaller? */
#define IIO_ALIGN L1_CACHE_BYTES
-/**
- * iio_device_alloc() - allocate an iio_dev from a driver
- * @sizeof_priv: Space to allocate for private structure.
- **/
struct iio_dev *iio_device_alloc(int sizeof_priv);
static inline void *iio_priv(const struct iio_dev *indio_dev)
@@ -596,64 +548,11 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
}
-/**
- * iio_device_free() - free an iio_dev from a driver
- * @indio_dev: the iio_dev associated with the device
- **/
void iio_device_free(struct iio_dev *indio_dev);
-
-/**
- * devm_iio_device_alloc - Resource-managed iio_device_alloc()
- * @dev: Device to allocate iio_dev for
- * @sizeof_priv: Space to allocate for private structure.
- *
- * Managed iio_device_alloc. iio_dev allocated with this function is
- * automatically freed on driver detach.
- *
- * If an iio_dev allocated with this function needs to be freed separately,
- * devm_iio_device_free() must be used.
- *
- * RETURNS:
- * Pointer to allocated iio_dev on success, NULL on failure.
- */
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
-
-/**
- * devm_iio_device_free - Resource-managed iio_device_free()
- * @dev: Device this iio_dev belongs to
- * @indio_dev: the iio_dev associated with the device
- *
- * Free iio_dev allocated with devm_iio_device_alloc().
- */
void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
-
-/**
- * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
- * @dev: Device to allocate iio_trigger for
- * @fmt: trigger name format. If it includes format
- * specifiers, the additional arguments following
- * format are formatted and inserted in the resulting
- * string replacing their respective specifiers.
- *
- * Managed iio_trigger_alloc. iio_trigger allocated with this function is
- * automatically freed on driver detach.
- *
- * If an iio_trigger allocated with this function needs to be freed separately,
- * devm_iio_trigger_free() must be used.
- *
- * RETURNS:
- * Pointer to allocated iio_trigger on success, NULL on failure.
- */
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...);
-
-/**
- * devm_iio_trigger_free - Resource-managed iio_trigger_free()
- * @dev: Device this iio_dev belongs to
- * @iio_trig: the iio_trigger associated with the device
- *
- * Free iio_trigger allocated with devm_iio_trigger_alloc().
- */
void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
/**
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 4ac928e..084d882 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,6 +29,7 @@ enum iio_chan_type {
IIO_ALTVOLTAGE,
IIO_CCT,
IIO_PRESSURE,
+ IIO_HUMIDITYRELATIVE,
};
enum iio_modifier {
diff --git a/include/linux/init.h b/include/linux/init.h
index 8e68a64..e168880 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -286,9 +286,11 @@ void __init parse_early_options(char *cmdline);
#define arch_initcall(fn) module_init(fn)
#define subsys_initcall(fn) module_init(fn)
#define fs_initcall(fn) module_init(fn)
+#define rootfs_initcall(fn) module_init(fn)
#define device_initcall(fn) module_init(fn)
#define late_initcall(fn) module_init(fn)
+#define console_initcall(fn) module_init(fn)
#define security_initcall(fn) module_init(fn)
/* Each module must use one module_init(). */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b0ed422..f0e5238 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -11,6 +11,7 @@
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <linux/seqlock.h>
+#include <linux/rbtree.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
@@ -154,6 +155,14 @@ extern struct task_group root_task_group;
#define INIT_TASK_COMM "swapper"
+#ifdef CONFIG_RT_MUTEXES
+# define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT, \
+ .pi_waiters_leftmost = NULL,
+#else
+# define INIT_RT_MUTEXES(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -221,6 +230,7 @@ extern struct task_group root_task_group;
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
INIT_CPUSET_SEQ(tsk) \
+ INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
}
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 56fb646..26e2661 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -152,6 +152,14 @@ static inline int irq_balancing_disabled(unsigned int irq)
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
}
+static inline int irq_is_percpu(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_PER_CPU;
+}
+
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
{
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 3999977..5c1dfb2 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -81,18 +81,21 @@ struct module;
#include <linux/atomic.h>
#ifdef HAVE_JUMP_LABEL
-#define JUMP_LABEL_TRUE_BRANCH 1UL
+#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
+#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
+#define JUMP_LABEL_TYPE_MASK 1UL
static
inline struct jump_entry *jump_label_get_entries(struct static_key *key)
{
return (struct jump_entry *)((unsigned long)key->entries
- & ~JUMP_LABEL_TRUE_BRANCH);
+ & ~JUMP_LABEL_TYPE_MASK);
}
static inline bool jump_label_get_branch_default(struct static_key *key)
{
- if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
+ if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
+ JUMP_LABEL_TYPE_TRUE_BRANCH)
return true;
return false;
}
@@ -122,10 +125,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
- { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
- { .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(1), \
+ .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0), \
+ .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
#else /* !HAVE_JUMP_LABEL */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ecb8754..2aa3d4b0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -394,6 +394,15 @@ extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int sysctl_panic_on_stackoverflow;
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
extern const char *print_tainted(void);
enum lockdep_ok {
LOCKDEP_STILL_OK,
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
new file mode 100644
index 0000000..5be9f02
--- /dev/null
+++ b/include/linux/kernfs.h
@@ -0,0 +1,376 @@
+/*
+ * kernfs.h - pseudo filesystem decoupled from vfs locking
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef __LINUX_KERNFS_H
+#define __LINUX_KERNFS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/idr.h>
+#include <linux/lockdep.h>
+#include <linux/rbtree.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+
+struct file;
+struct dentry;
+struct iattr;
+struct seq_file;
+struct vm_area_struct;
+struct super_block;
+struct file_system_type;
+
+struct kernfs_open_node;
+struct kernfs_iattrs;
+
+enum kernfs_node_type {
+ KERNFS_DIR = 0x0001,
+ KERNFS_FILE = 0x0002,
+ KERNFS_LINK = 0x0004,
+};
+
+#define KERNFS_TYPE_MASK 0x000f
+#define KERNFS_ACTIVE_REF KERNFS_FILE
+#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+
+enum kernfs_node_flag {
+ KERNFS_REMOVED = 0x0010,
+ KERNFS_NS = 0x0020,
+ KERNFS_HAS_SEQ_SHOW = 0x0040,
+ KERNFS_HAS_MMAP = 0x0080,
+ KERNFS_LOCKDEP = 0x0100,
+ KERNFS_STATIC_NAME = 0x0200,
+};
+
+/* type-specific structures for kernfs_node union members */
+struct kernfs_elem_dir {
+ unsigned long subdirs;
+ /* children rbtree starts here and goes through kn->rb */
+ struct rb_root children;
+
+ /*
+ * The kernfs hierarchy this directory belongs to. This fits
+ * better directly in kernfs_node but is here to save space.
+ */
+ struct kernfs_root *root;
+};
+
+struct kernfs_elem_symlink {
+ struct kernfs_node *target_kn;
+};
+
+struct kernfs_elem_attr {
+ const struct kernfs_ops *ops;
+ struct kernfs_open_node *open;
+ loff_t size;
+};
+
+/*
+ * kernfs_node - the building block of kernfs hierarchy. Each and every
+ * kernfs node is represented by single kernfs_node. Most fields are
+ * private to kernfs and shouldn't be accessed directly by kernfs users.
+ *
+ * As long as s_count reference is held, the kernfs_node itself is
+ * accessible. Dereferencing elem or any other outer entity requires
+ * active reference.
+ */
+struct kernfs_node {
+ atomic_t count;
+ atomic_t active;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+ /* the following two fields are published */
+ struct kernfs_node *parent;
+ const char *name;
+
+ struct rb_node rb;
+
+ union {
+ struct completion *completion;
+ struct kernfs_node *removed_list;
+ } u;
+
+ const void *ns; /* namespace tag */
+ unsigned int hash; /* ns + name hash */
+ union {
+ struct kernfs_elem_dir dir;
+ struct kernfs_elem_symlink symlink;
+ struct kernfs_elem_attr attr;
+ };
+
+ void *priv;
+
+ unsigned short flags;
+ umode_t mode;
+ unsigned int ino;
+ struct kernfs_iattrs *iattr;
+};
+
+/*
+ * kernfs_dir_ops may be specified on kernfs_create_root() to support
+ * directory manipulation syscalls. These optional callbacks are invoked
+ * on the matching syscalls and can perform any kernfs operations which
+ * don't necessarily have to be the exact operation requested.
+ */
+struct kernfs_dir_ops {
+ int (*mkdir)(struct kernfs_node *parent, const char *name,
+ umode_t mode);
+ int (*rmdir)(struct kernfs_node *kn);
+ int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ const char *new_name);
+};
+
+struct kernfs_root {
+ /* published fields */
+ struct kernfs_node *kn;
+
+ /* private fields, do not use outside kernfs proper */
+ struct ida ino_ida;
+ struct kernfs_dir_ops *dir_ops;
+};
+
+struct kernfs_open_file {
+ /* published fields */
+ struct kernfs_node *kn;
+ struct file *file;
+
+ /* private fields, do not use outside kernfs proper */
+ struct mutex mutex;
+ int event;
+ struct list_head list;
+
+ bool mmapped;
+ const struct vm_operations_struct *vm_ops;
+};
+
+struct kernfs_ops {
+ /*
+ * Read is handled by either seq_file or raw_read().
+ *
+ * If seq_show() is present, seq_file path is active. Other seq
+ * operations are optional and if not implemented, the behavior is
+ * equivalent to single_open(). @sf->private points to the
+ * associated kernfs_open_file.
+ *
+ * read() is bounced through kernel buffer and a read larger than
+ * PAGE_SIZE results in partial operation of PAGE_SIZE.
+ */
+ int (*seq_show)(struct seq_file *sf, void *v);
+
+ void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
+ void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
+ void (*seq_stop)(struct seq_file *sf, void *v);
+
+ ssize_t (*read)(struct kernfs_open_file *of, char *buf, size_t bytes,
+ loff_t off);
+
+ /*
+ * write() is bounced through kernel buffer and a write larger than
+ * PAGE_SIZE results in partial operation of PAGE_SIZE.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
+ loff_t off);
+
+ int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lockdep_key;
+#endif
+};
+
+#ifdef CONFIG_SYSFS
+
+static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
+{
+ return kn->flags & KERNFS_TYPE_MASK;
+}
+
+/**
+ * kernfs_enable_ns - enable namespace under a directory
+ * @kn: directory of interest, should be empty
+ *
+ * This is to be called right after @kn is created to enable namespace
+ * under it. All children of @kn must have non-NULL namespace tags and
+ * only the ones which match the super_block's tag will be visible.
+ */
+static inline void kernfs_enable_ns(struct kernfs_node *kn)
+{
+ WARN_ON_ONCE(kernfs_type(kn) != KERNFS_DIR);
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&kn->dir.children));
+ kn->flags |= KERNFS_NS;
+}
+
+/**
+ * kernfs_ns_enabled - test whether namespace is enabled
+ * @kn: the node to test
+ *
+ * Test whether namespace filtering is enabled for the children of @ns.
+ */
+static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
+{
+ return kn->flags & KERNFS_NS;
+}
+
+struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
+ const char *name, const void *ns);
+void kernfs_get(struct kernfs_node *kn);
+void kernfs_put(struct kernfs_node *kn);
+
+struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops,
+ void *priv);
+void kernfs_destroy_root(struct kernfs_root *root);
+
+struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+ const char *name, umode_t mode,
+ void *priv, const void *ns);
+struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
+ const char *name,
+ umode_t mode, loff_t size,
+ const struct kernfs_ops *ops,
+ void *priv, const void *ns,
+ bool name_is_static,
+ struct lock_class_key *key);
+struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
+ const char *name,
+ struct kernfs_node *target);
+void kernfs_remove(struct kernfs_node *kn);
+int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
+ const void *ns);
+int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ const char *new_name, const void *new_ns);
+int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+void kernfs_notify(struct kernfs_node *kn);
+
+const void *kernfs_super_ns(struct super_block *sb);
+struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root, const void *ns);
+void kernfs_kill_sb(struct super_block *sb);
+
+void kernfs_init(void);
+
+#else /* CONFIG_SYSFS */
+
+static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
+{ return 0; } /* whatever */
+
+static inline void kernfs_enable_ns(struct kernfs_node *kn) { }
+
+static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
+{ return false; }
+
+static inline struct kernfs_node *
+kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
+ const void *ns)
+{ return NULL; }
+
+static inline void kernfs_get(struct kernfs_node *kn) { }
+static inline void kernfs_put(struct kernfs_node *kn) { }
+
+static inline struct kernfs_root *
+kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline void kernfs_destroy_root(struct kernfs_root *root) { }
+
+static inline struct kernfs_node *
+kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
+ umode_t mode, void *priv, const void *ns)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline struct kernfs_node *
+__kernfs_create_file(struct kernfs_node *parent, const char *name,
+ umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ void *priv, const void *ns, bool name_is_static,
+ struct lock_class_key *key)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline struct kernfs_node *
+kernfs_create_link(struct kernfs_node *parent, const char *name,
+ struct kernfs_node *target)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline void kernfs_remove(struct kernfs_node *kn) { }
+
+static inline int kernfs_remove_by_name_ns(struct kernfs_node *kn,
+ const char *name, const void *ns)
+{ return -ENOSYS; }
+
+static inline int kernfs_rename_ns(struct kernfs_node *kn,
+ struct kernfs_node *new_parent,
+ const char *new_name, const void *new_ns)
+{ return -ENOSYS; }
+
+static inline int kernfs_setattr(struct kernfs_node *kn,
+ const struct iattr *iattr)
+{ return -ENOSYS; }
+
+static inline void kernfs_notify(struct kernfs_node *kn) { }
+
+static inline const void *kernfs_super_ns(struct super_block *sb)
+{ return NULL; }
+
+static inline struct dentry *
+kernfs_mount_ns(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root, const void *ns)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline void kernfs_kill_sb(struct super_block *sb) { }
+
+static inline void kernfs_init(void) { }
+
+#endif /* CONFIG_SYSFS */
+
+static inline struct kernfs_node *
+kernfs_find_and_get(struct kernfs_node *kn, const char *name)
+{
+ return kernfs_find_and_get_ns(kn, name, NULL);
+}
+
+static inline struct kernfs_node *
+kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
+ void *priv)
+{
+ return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
+}
+
+static inline struct kernfs_node *
+kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
+ umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ void *priv, const void *ns)
+{
+ struct lock_class_key *key = NULL;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ key = (struct lock_class_key *)&ops->lockdep_key;
+#endif
+ return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
+ false, key);
+}
+
+static inline struct kernfs_node *
+kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
+ loff_t size, const struct kernfs_ops *ops, void *priv)
+{
+ return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+}
+
+static inline int kernfs_remove_by_name(struct kernfs_node *parent,
+ const char *name)
+{
+ return kernfs_remove_by_name_ns(parent, name, NULL);
+}
+
+static inline struct dentry *
+kernfs_mount(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root)
+{
+ return kernfs_mount_ns(fs_type, flags, root, NULL);
+}
+
+#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/kobj_completion.h b/include/linux/kobj_completion.h
deleted file mode 100644
index a428f64..0000000
--- a/include/linux/kobj_completion.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _KOBJ_COMPLETION_H_
-#define _KOBJ_COMPLETION_H_
-
-#include <linux/kobject.h>
-#include <linux/completion.h>
-
-struct kobj_completion {
- struct kobject kc_kobj;
- struct completion kc_unregister;
-};
-
-#define kobj_to_kobj_completion(kobj) \
- container_of(kobj, struct kobj_completion, kc_kobj)
-
-void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype);
-void kobj_completion_release(struct kobject *kobj);
-void kobj_completion_del_and_wait(struct kobj_completion *kc);
-#endif /* _KOBJ_COMPLETION_H_ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e7ba650..926afb6 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -64,7 +64,7 @@ struct kobject {
struct kobject *parent;
struct kset *kset;
struct kobj_type *ktype;
- struct sysfs_dirent *sd;
+ struct kernfs_node *sd;
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26..9b50337 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
+ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 9a6bbf7..bb7384e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -35,6 +35,7 @@ struct memory_block {
};
int arch_get_memory_phys_device(unsigned long start_pfn);
+unsigned long __weak memory_block_size_bytes(void);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index cb49417..b319765 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2196,6 +2196,15 @@
/*
* R677 (0x2A5) - Mic Detect 3
*/
+#define ARIZONA_MICD_LVL_0 0x0004 /* MICD_LVL - [2] */
+#define ARIZONA_MICD_LVL_1 0x0008 /* MICD_LVL - [3] */
+#define ARIZONA_MICD_LVL_2 0x0010 /* MICD_LVL - [4] */
+#define ARIZONA_MICD_LVL_3 0x0020 /* MICD_LVL - [5] */
+#define ARIZONA_MICD_LVL_4 0x0040 /* MICD_LVL - [6] */
+#define ARIZONA_MICD_LVL_5 0x0080 /* MICD_LVL - [7] */
+#define ARIZONA_MICD_LVL_6 0x0100 /* MICD_LVL - [8] */
+#define ARIZONA_MICD_LVL_7 0x0200 /* MICD_LVL - [9] */
+#define ARIZONA_MICD_LVL_8 0x0400 /* MICD_LVL - [10] */
#define ARIZONA_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
#define ARIZONA_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
#define ARIZONA_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index b6d36b3..866e355 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -212,6 +212,7 @@
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI1 (0x1 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU2_DI0 (0x2 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU2_DI1 (0x3 << 4)
+#define IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT 2
#define IMX6Q_GPR3_HDMI_MUX_CTL_MASK (0x3 << 2)
#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU1_DI0 (0x0 << 2)
#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU1_DI1 (0x1 << 2)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d9a550b..ce2a1f5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -769,7 +769,8 @@ struct netdev_phys_port_id {
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
- * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
+ * void *accel_priv);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
@@ -990,7 +991,8 @@ struct net_device_ops {
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ void *accel_priv);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
}
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ void *accel_priv);
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -1912,6 +1916,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline int dev_rebuild_header(struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->rebuild)
+ return 0;
+ return dev->header_ops->rebuild(skb);
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2417,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_port_id *ppid);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq, void *accel_priv);
+ struct netdev_queue *txq);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
extern int netdev_budget;
@@ -3008,6 +3021,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
static inline bool netif_is_macvlan(struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890a..a5fc7d0 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) name
#else
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2e069d1..e56b07f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -320,6 +320,7 @@ struct perf_event {
struct list_head migrate_entry;
struct hlist_node hlist_entry;
+ struct list_head active_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
index c167e44..0e3cce1 100644
--- a/include/linux/platform_data/hwmon-s3c.h
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s3c/include/plat/hwmon.h
- *
+/*
* Copyright 2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
@@ -11,8 +10,8 @@
* published by the Free Software Foundation.
*/
-#ifndef __ASM_ARCH_ADC_HWMON_H
-#define __ASM_ARCH_ADC_HWMON_H __FILE__
+#ifndef __HWMON_S3C_H__
+#define __HWMON_S3C_H__
/**
* s3c_hwmon_chcfg - channel configuration
@@ -47,5 +46,4 @@ struct s3c_hwmon_pdata {
*/
extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
-#endif /* __ASM_ARCH_ADC_HWMON_H */
-
+#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h
index e2a41dd..8da8f94 100644
--- a/include/linux/platform_data/max197.h
+++ b/include/linux/platform_data/max197.h
@@ -11,6 +11,9 @@
* For further information, see the Documentation/hwmon/max197 file.
*/
+#ifndef _PDATA_MAX197_H
+#define _PDATA_MAX197_H
+
/**
* struct max197_platform_data - MAX197 connectivity info
* @convert: Function used to start a conversion with control byte ctrl.
@@ -19,3 +22,5 @@
struct max197_platform_data {
int (*convert)(u8 ctrl);
};
+
+#endif /* _PDATA_MAX197_H */
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h
index 33e0fd2..12289c1 100644
--- a/include/linux/platform_data/sht15.h
+++ b/include/linux/platform_data/sht15.h
@@ -12,6 +12,9 @@
* For further information, see the Documentation/hwmon/sht15 file.
*/
+#ifndef _PDATA_SHT15_H
+#define _PDATA_SHT15_H
+
/**
* struct sht15_platform_data - sht15 connectivity info
* @gpio_data: no. of gpio to which bidirectional data line is
@@ -31,3 +34,5 @@ struct sht15_platform_data {
bool no_otp_reload;
bool low_resolution;
};
+
+#endif /* _PDATA_SHT15_H */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index a3d9dc8..59749fc 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -64,7 +64,11 @@ do { \
} while (0)
#else
-#define preempt_enable() preempt_enable_no_resched()
+#define preempt_enable() \
+do { \
+ barrier(); \
+ preempt_count_dec(); \
+} while (0)
#define preempt_check_resched() do { } while (0)
#endif
@@ -93,7 +97,11 @@ do { \
__preempt_schedule_context(); \
} while (0)
#else
-#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ __preempt_count_dec(); \
+} while (0)
#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -116,6 +124,31 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef MODULE
+/*
+ * Modules have no business playing preemption tricks.
+ */
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#undef preempt_enable_no_resched_notrace
+#undef preempt_check_resched
+#endif
+
+#ifdef CONFIG_PREEMPT
+#define preempt_set_need_resched() \
+do { \
+ set_preempt_need_resched(); \
+} while (0)
+#define preempt_fold_need_resched() \
+do { \
+ if (tif_need_resched()) \
+ set_preempt_need_resched(); \
+} while (0)
+#else
+#define preempt_set_need_resched() do { } while (0)
+#define preempt_fold_need_resched() do { } while (0)
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index d169820..dbeec4d 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -2,7 +2,6 @@
#define LINUX_PREEMPT_MASK_H
#include <linux/preempt.h>
-#include <asm/hardirq.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -79,6 +78,21 @@
#endif
/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
+
+/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 45a0a9e..dbaf990 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -55,8 +55,8 @@ static inline void __list_add_rcu(struct list_head *new,
next->prev = new;
}
#else
-extern void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next);
+void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next);
#endif
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 39cbb88..3e355c6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -50,13 +50,13 @@ extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
-extern void rcutorture_record_test_transition(void);
-extern void rcutorture_record_progress(unsigned long vernum);
-extern void do_trace_rcu_torture_read(const char *rcutorturename,
- struct rcu_head *rhp,
- unsigned long secs,
- unsigned long c_old,
- unsigned long c);
+void rcutorture_record_test_transition(void);
+void rcutorture_record_progress(unsigned long vernum);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
#else
static inline void rcutorture_record_test_transition(void)
{
@@ -65,11 +65,11 @@ static inline void rcutorture_record_progress(unsigned long vernum)
{
}
#ifdef CONFIG_RCU_TRACE
-extern void do_trace_rcu_torture_read(const char *rcutorturename,
- struct rcu_head *rhp,
- unsigned long secs,
- unsigned long c_old,
- unsigned long c);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
do { } while (0)
@@ -118,8 +118,8 @@ extern void do_trace_rcu_torture_read(const char *rcutorturename,
* if CPU A and CPU B are the same CPU (but again only if the system has
* more than one CPU).
*/
-extern void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -149,8 +149,8 @@ extern void call_rcu(struct rcu_head *head,
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-extern void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -171,16 +171,16 @@ extern void call_rcu_bh(struct rcu_head *head,
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
+void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
-extern void synchronize_sched(void);
+void synchronize_sched(void);
#ifdef CONFIG_PREEMPT_RCU
-extern void __rcu_read_lock(void);
-extern void __rcu_read_unlock(void);
-extern void rcu_read_unlock_special(struct task_struct *t);
+void __rcu_read_lock(void);
+void __rcu_read_unlock(void);
+void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -216,19 +216,19 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
-extern void rcu_init(void);
-extern void rcu_sched_qs(int cpu);
-extern void rcu_bh_qs(int cpu);
-extern void rcu_check_callbacks(int cpu, int user);
+void rcu_init(void);
+void rcu_sched_qs(int cpu);
+void rcu_bh_qs(int cpu);
+void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
-extern void rcu_idle_enter(void);
-extern void rcu_idle_exit(void);
-extern void rcu_irq_enter(void);
-extern void rcu_irq_exit(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
#ifdef CONFIG_RCU_USER_QS
-extern void rcu_user_enter(void);
-extern void rcu_user_exit(void);
+void rcu_user_enter(void);
+void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
@@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
} while (0)
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
-extern bool __rcu_is_watching(void);
+bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
/*
@@ -289,8 +289,8 @@ void wait_rcu_gp(call_rcu_func_t crf);
* initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-extern void init_rcu_head_on_stack(struct rcu_head *head);
-extern void destroy_rcu_head_on_stack(struct rcu_head *head);
+void init_rcu_head_on_stack(struct rcu_head *head);
+void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
@@ -325,6 +325,7 @@ static inline void rcu_lock_release(struct lockdep_map *map)
extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
extern int debug_lockdep_rcu_enabled(void);
/**
@@ -362,7 +363,7 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
-extern int rcu_read_lock_bh_held(void);
+int rcu_read_lock_bh_held(void);
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -448,7 +449,7 @@ static inline int rcu_read_lock_sched_held(void)
#ifdef CONFIG_PROVE_RCU
-extern int rcu_my_thread_group_empty(void);
+int rcu_my_thread_group_empty(void);
/**
* rcu_lockdep_assert - emit lockdep splat if specified condition not met
@@ -548,10 +549,48 @@ static inline void rcu_preempt_sleep_check(void)
smp_read_barrier_depends(); \
(_________p1); \
})
-#define __rcu_assign_pointer(p, v, space) \
+
+/**
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+ * @v: The value to statically initialize with.
+ */
+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+
+/**
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
+ *
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization.
+ *
+ * Inserts memory barriers on architectures that require them
+ * (which is most of them), and also prevents the compiler from
+ * reordering the code that initializes the structure after the pointer
+ * assignment. More importantly, this call documents which pointers
+ * will be dereferenced by RCU read-side code.
+ *
+ * In some special cases, you may use RCU_INIT_POINTER() instead
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+ * to the fact that it does not constrain either the CPU or the compiler.
+ * That said, using RCU_INIT_POINTER() when you should have used
+ * rcu_assign_pointer() is a very bad thing that results in
+ * impossible-to-diagnose memory corruption. So please be careful.
+ * See the RCU_INIT_POINTER() comment header for details.
+ *
+ * Note that rcu_assign_pointer() evaluates each of its arguments only
+ * once, appearances notwithstanding. One of the "extra" evaluations
+ * is in typeof() and the other visible only to sparse (__CHECKER__),
+ * neither of which actually execute the argument. As with most cpp
+ * macros, this execute-arguments-only-once property is important, so
+ * please be careful when making changes to rcu_assign_pointer() and the
+ * other macros that it invokes.
+ */
+#define rcu_assign_pointer(p, v) \
do { \
smp_wmb(); \
- (p) = (typeof(*v) __force space *)(v); \
+ ACCESS_ONCE(p) = RCU_INITIALIZER(v); \
} while (0)
@@ -890,32 +929,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
}
/**
- * rcu_assign_pointer() - assign to RCU-protected pointer
- * @p: pointer to assign to
- * @v: value to assign (publish)
- *
- * Assigns the specified value to the specified RCU-protected
- * pointer, ensuring that any concurrent RCU readers will see
- * any prior initialization.
- *
- * Inserts memory barriers on architectures that require them
- * (which is most of them), and also prevents the compiler from
- * reordering the code that initializes the structure after the pointer
- * assignment. More importantly, this call documents which pointers
- * will be dereferenced by RCU read-side code.
- *
- * In some special cases, you may use RCU_INIT_POINTER() instead
- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
- * to the fact that it does not constrain either the CPU or the compiler.
- * That said, using RCU_INIT_POINTER() when you should have used
- * rcu_assign_pointer() is a very bad thing that results in
- * impossible-to-diagnose memory corruption. So please be careful.
- * See the RCU_INIT_POINTER() comment header for details.
- */
-#define rcu_assign_pointer(p, v) \
- __rcu_assign_pointer((p), (v), __rcu)
-
-/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
*
* Initialize an RCU-protected pointer in special cases where readers
@@ -949,7 +962,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define RCU_INIT_POINTER(p, v) \
do { \
- p = (typeof(*v) __force __rcu *)(v); \
+ p = RCU_INITIALIZER(v); \
} while (0)
/**
@@ -958,7 +971,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
#define RCU_POINTER_INITIALIZER(p, v) \
- .p = (typeof(*v) __force __rcu *)(v)
+ .p = RCU_INITIALIZER(v)
/*
* Does the specified offset indicate that the corresponding rcu_head
@@ -1005,7 +1018,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#ifdef CONFIG_RCU_NOCB_CPU
-extern bool rcu_is_nocb_cpu(int cpu);
+bool rcu_is_nocb_cpu(int cpu);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
@@ -1013,8 +1026,8 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
/* Only for use by adaptive-ticks code. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-extern bool rcu_sys_is_idle(void);
-extern void rcu_sysidle_force_exit(void);
+bool rcu_sys_is_idle(void);
+void rcu_sysidle_force_exit(void);
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
static inline bool rcu_sys_is_idle(void)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 09ebcbe..6f01771 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -125,7 +125,7 @@ static inline void exit_rcu(void)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern int rcu_scheduler_active __read_mostly;
-extern void rcu_scheduler_starting(void);
+void rcu_scheduler_starting(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline void rcu_scheduler_starting(void)
{
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 4b9c815..72137ee 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,9 +30,9 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
-extern void rcu_cpu_stall_reset(void);
+void rcu_note_context_switch(int cpu);
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
+void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
@@ -44,9 +44,9 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(cpu);
}
-extern void synchronize_rcu_bh(void);
-extern void synchronize_sched_expedited(void);
-extern void synchronize_rcu_expedited(void);
+void synchronize_rcu_bh(void);
+void synchronize_sched_expedited(void);
+void synchronize_rcu_expedited(void);
void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
@@ -71,25 +71,25 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched_expedited();
}
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
+void rcu_barrier(void);
+void rcu_barrier_bh(void);
+void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
-extern long rcu_batches_completed_sched(void);
+long rcu_batches_completed(void);
+long rcu_batches_completed_bh(void);
+long rcu_batches_completed_sched(void);
-extern void rcu_force_quiescent_state(void);
-extern void rcu_bh_force_quiescent_state(void);
-extern void rcu_sched_force_quiescent_state(void);
+void rcu_force_quiescent_state(void);
+void rcu_bh_force_quiescent_state(void);
+void rcu_sched_force_quiescent_state(void);
-extern void exit_rcu(void);
+void exit_rcu(void);
-extern void rcu_scheduler_starting(void);
+void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
-extern bool rcu_is_watching(void);
+bool rcu_is_watching(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index de17134..3aed8d7 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -13,7 +13,7 @@
#define __LINUX_RT_MUTEX_H
#include <linux/linkage.h>
-#include <linux/plist.h>
+#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
extern int max_lock_depth; /* for sysctl */
@@ -22,12 +22,14 @@ extern int max_lock_depth; /* for sysctl */
* The rt_mutex structure
*
* @wait_lock: spinlock to protect the structure
- * @wait_list: pilist head to enqueue waiters in priority order
+ * @waiters: rbtree root to enqueue waiters in priority order
+ * @waiters_leftmost: top waiter
* @owner: the mutex owner
*/
struct rt_mutex {
raw_spinlock_t wait_lock;
- struct plist_head wait_list;
+ struct rb_root waiters;
+ struct rb_node *waiters_leftmost;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
@@ -66,7 +68,7 @@ struct hrtimer_sleeper;
#define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+ , .waiters = RB_ROOT \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
@@ -98,12 +100,4 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \
- INIT_RT_MUTEX_DEBUG(tsk)
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 939428a..8e3e66a 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
#ifdef CONFIG_PROVE_LOCKING
extern int lockdep_rtnl_is_held(void);
+#else
+static inline int lockdep_rtnl_is_held(void)
+{
+ return 1;
+}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
/**
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 9c9f049..5b9b84b 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -172,8 +172,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
static inline void __raw_read_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
@@ -200,8 +199,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
static inline void __raw_write_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
@@ -250,8 +248,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
@@ -275,8 +272,7 @@ static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 53f97eb..ffccdad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -16,6 +16,7 @@ struct sched_param {
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
+#include <linux/plist.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
@@ -56,6 +57,70 @@ struct sched_param {
#include <asm/processor.h>
+#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
+
+/*
+ * Extended scheduling parameters data structure.
+ *
+ * This is needed because the original struct sched_param can not be
+ * altered without introducing ABI issues with legacy applications
+ * (e.g., in sched_getparam()).
+ *
+ * However, the possibility of specifying more than just a priority for
+ * the tasks may be useful for a wide variety of application fields, e.g.,
+ * multimedia, streaming, automation and control, and many others.
+ *
+ * This variant (sched_attr) is meant at describing a so-called
+ * sporadic time-constrained task. In such model a task is specified by:
+ * - the activation period or minimum instance inter-arrival time;
+ * - the maximum (or average, depending on the actual scheduling
+ * discipline) computation time of all instances, a.k.a. runtime;
+ * - the deadline (relative to the actual activation time) of each
+ * instance.
+ * Very briefly, a periodic (sporadic) task asks for the execution of
+ * some specific computation --which is typically called an instance--
+ * (at most) every period. Moreover, each instance typically lasts no more
+ * than the runtime and must be completed by time instant t equal to
+ * the instance activation time + the deadline.
+ *
+ * This is reflected by the actual fields of the sched_attr structure:
+ *
+ * @size size of the structure, for fwd/bwd compat.
+ *
+ * @sched_policy task's scheduling policy
+ * @sched_flags for customizing the scheduler behaviour
+ * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
+ * @sched_priority task's static priority (SCHED_FIFO/RR)
+ * @sched_deadline representative of the task's deadline
+ * @sched_runtime representative of the task's runtime
+ * @sched_period representative of the task's period
+ *
+ * Given this task model, there are a multiplicity of scheduling algorithms
+ * and policies, that can be used to ensure all the tasks will make their
+ * timing constraints.
+ *
+ * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
+ * only user of this new interface. More information about the algorithm
+ * available in the scheduling class file or in Documentation/.
+ */
+struct sched_attr {
+ u32 size;
+
+ u32 sched_policy;
+ u64 sched_flags;
+
+ /* SCHED_NORMAL, SCHED_BATCH */
+ s32 sched_nice;
+
+ /* SCHED_FIFO, SCHED_RR */
+ u32 sched_priority;
+
+ /* SCHED_DEADLINE */
+ u64 sched_runtime;
+ u64 sched_deadline;
+ u64 sched_period;
+};
+
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
@@ -168,7 +233,6 @@ extern char ___assert_task_state[1 - 2*!!(
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-#define task_is_dead(task) ((task)->exit_state != 0)
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
@@ -1029,6 +1093,51 @@ struct sched_rt_entity {
#endif
};
+struct sched_dl_entity {
+ struct rb_node rb_node;
+
+ /*
+ * Original scheduling parameters. Copied here from sched_attr
+ * during sched_setscheduler2(), they will remain the same until
+ * the next sched_setscheduler2().
+ */
+ u64 dl_runtime; /* maximum runtime for each instance */
+ u64 dl_deadline; /* relative deadline of each instance */
+ u64 dl_period; /* separation of two instances (period) */
+ u64 dl_bw; /* dl_runtime / dl_deadline */
+
+ /*
+ * Actual scheduling parameters. Initialized with the values above,
+ * they are continously updated during task execution. Note that
+ * the remaining runtime could be < 0 in case we are in overrun.
+ */
+ s64 runtime; /* remaining runtime for this instance */
+ u64 deadline; /* absolute deadline for this instance */
+ unsigned int flags; /* specifying the scheduler behaviour */
+
+ /*
+ * Some bool flags:
+ *
+ * @dl_throttled tells if we exhausted the runtime. If so, the
+ * task has to wait for a replenishment to be performed at the
+ * next firing of dl_timer.
+ *
+ * @dl_new tells if a new instance arrived. If so we must
+ * start executing it with full runtime and reset its absolute
+ * deadline;
+ *
+ * @dl_boosted tells if we are boosted due to DI. If so we are
+ * outside bandwidth enforcement mechanism (but only until we
+ * exit the critical section).
+ */
+ int dl_throttled, dl_new, dl_boosted;
+
+ /*
+ * Bandwidth enforcement timer. Each -deadline task has its
+ * own bandwidth to be enforced, thus we need one timer per task.
+ */
+ struct hrtimer dl_timer;
+};
struct rcu_node;
@@ -1065,6 +1174,7 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
+ struct sched_dl_entity dl;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -1098,6 +1208,7 @@ struct task_struct {
struct list_head tasks;
#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
+ struct rb_node pushable_dl_tasks;
#endif
struct mm_struct *mm, *active_mm;
@@ -1249,9 +1360,12 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
- struct plist_head pi_waiters;
+ struct rb_root pi_waiters;
+ struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
+ /* Top pi_waiters task */
+ struct task_struct *pi_top_task;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
@@ -1880,7 +1994,9 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
-extern int sched_clock_stable;
+extern int sched_clock_stable(void);
+extern void set_sched_clock_stable(void);
+extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
@@ -1959,6 +2075,8 @@ extern int sched_setscheduler(struct task_struct *, int,
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
+extern int sched_setattr(struct task_struct *,
+ const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
@@ -2038,7 +2156,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
@@ -2627,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
+static inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb(); /* paired with resched_task() */
+
+ preempt_fold_need_resched();
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
new file mode 100644
index 0000000..9d303b8
--- /dev/null
+++ b/include/linux/sched/deadline.h
@@ -0,0 +1,24 @@
+#ifndef _SCHED_DEADLINE_H
+#define _SCHED_DEADLINE_H
+
+/*
+ * SCHED_DEADLINE tasks has negative priorities, reflecting
+ * the fact that any of them has higher prio than RT and
+ * NORMAL/BATCH tasks.
+ */
+
+#define MAX_DL_PRIO 0
+
+static inline int dl_prio(int prio)
+{
+ if (unlikely(prio < MAX_DL_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int dl_task(struct task_struct *p)
+{
+ return dl_prio(p->prio);
+}
+
+#endif /* _SCHED_DEADLINE_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 440434d..34e4ebe 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -35,6 +35,7 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+{
+ return NULL;
+}
# define rt_mutex_adjust_pi(p) do { } while (0)
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 41467f8..31e0193 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_migration_cost;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cf87a24..535f158 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -117,15 +117,15 @@ repeat:
}
/**
- * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
- * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * raw_read_seqcount_begin opens a read critical section of the given
* seqcount, but without any lockdep checking. Validity of the critical
* section is tested by checking read_seqcount_retry function.
*/
-static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
smp_rmb();
@@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
seqcount_lockdep_reader_access(s);
- return read_seqcount_begin_no_lockdep(s);
+ return raw_read_seqcount_begin(s);
}
/**
@@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
}
+
+static inline void raw_write_seqcount_begin(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+}
+
+static inline void raw_write_seqcount_end(seqcount_t *s)
+{
+ smp_wmb();
+ s->sequence++;
+}
+
/*
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- s->sequence++;
- smp_wmb();
+ raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
@@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
static inline void write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, 1, _RET_IP_);
- smp_wmb();
- s->sequence++;
+ raw_write_seqcount_end(s);
}
/**
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 215b5ea..6f69b3f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
skb->mac_header += offset;
}
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+}
+
static inline void skb_probe_transport_header(struct sk_buff *skb,
const int offset_hint)
{
@@ -2526,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
* Ethernet MAC Drivers should call this function in their hard_xmit()
* function immediately before giving the sk_buff to the MAC hardware.
*
+ * Specifically, one should make absolutely sure that this function is
+ * called before TX completion of this packet can trigger. Otherwise
+ * the packet could potentially already be freed.
+ *
* @skb: A socket buffer.
*/
static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 75f3494..3f2867f 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,6 +130,16 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifndef smp_mb__after_unlock_lock
+#define smp_mb__after_unlock_lock() do { } while (0)
+#endif
+
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index bdb9993..42dfab8 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -131,8 +131,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
@@ -174,20 +173,17 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
return 0;
}
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index af1f472..d0d1888 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,11 +24,14 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
+#define ___LOCK(lock) \
+ do { __acquire(lock); (void)(lock); } while (0)
+
#define __LOCK(lock) \
- do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
+ do { preempt_disable(); ___LOCK(lock); } while (0)
#define __LOCK_BH(lock) \
- do { local_bh_disable(); __LOCK(lock); } while (0)
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
@@ -36,12 +39,15 @@
#define __LOCK_IRQSAVE(lock, flags) \
do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define ___UNLOCK(lock) \
+ do { __release(lock); (void)(lock); } while (0)
+
#define __UNLOCK(lock) \
- do { preempt_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable(); ___UNLOCK(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); \
- __release(lock); (void)(lock); } while (0)
+ do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
+ ___UNLOCK(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 94273bb..40ed9e9 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -38,6 +38,7 @@ struct rlimit;
struct rlimit64;
struct rusage;
struct sched_param;
+struct sched_attr;
struct sel_arg_struct;
struct semaphore;
struct sembuf;
@@ -279,9 +280,14 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
struct sched_param __user *param);
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
+asmlinkage long sys_sched_setattr(pid_t pid,
+ struct sched_attr __user *attr);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
+asmlinkage long sys_sched_getattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int size);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 6695040..30b2ebe 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -12,6 +12,7 @@
#ifndef _SYSFS_H_
#define _SYSFS_H_
+#include <linux/kernfs.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -175,8 +176,6 @@ struct sysfs_ops {
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
};
-struct sysfs_dirent;
-
#ifdef CONFIG_SYSFS
int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
@@ -244,12 +243,6 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
-void sysfs_notify_dirent(struct sysfs_dirent *sd);
-struct sysfs_dirent *sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd,
- const unsigned char *name,
- const void *ns);
-struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
-void sysfs_put(struct sysfs_dirent *sd);
int __must_check sysfs_init(void);
@@ -419,22 +412,6 @@ static inline void sysfs_notify(struct kobject *kobj, const char *dir,
const char *attr)
{
}
-static inline void sysfs_notify_dirent(struct sysfs_dirent *sd)
-{
-}
-static inline struct sysfs_dirent *
-sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd, const unsigned char *name,
- const void *ns)
-{
- return NULL;
-}
-static inline struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
-{
- return NULL;
-}
-static inline void sysfs_put(struct sysfs_dirent *sd)
-{
-}
static inline int __must_check sysfs_init(void)
{
@@ -461,10 +438,26 @@ static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target
return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL);
}
-static inline struct sysfs_dirent *
-sysfs_get_dirent(struct sysfs_dirent *parent_sd, const unsigned char *name)
+static inline void sysfs_notify_dirent(struct kernfs_node *kn)
+{
+ kernfs_notify(kn);
+}
+
+static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
+ const unsigned char *name)
+{
+ return kernfs_find_and_get(parent, name);
+}
+
+static inline struct kernfs_node *sysfs_get(struct kernfs_node *kn)
+{
+ kernfs_get(kn);
+ return kn;
+}
+
+static inline void sysfs_put(struct kernfs_node *kn)
{
- return sysfs_get_dirent_ns(parent_sd, name, NULL);
+ kernfs_put(kn);
}
#endif /* _SYSFS_H_ */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 5128d33..0175d86 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
-extern void tick_check_idle(int cpu);
+extern void tick_check_idle(void);
extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu
# define arch_needs_cpu(cpu) (0)
@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
# endif
@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return false;
return tick_nohz_full_running;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e53e90e..90b4fdc 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -39,10 +39,14 @@ struct tty_buffer {
int size;
int commit;
int read;
+ int flags;
/* Data points here */
unsigned long data[0];
};
+/* Values for .flags field of tty_buffer */
+#define TTYB_NORMAL 1 /* buffer has no flags buffer */
+
static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
{
return ((unsigned char *)b->data) + ofs;
@@ -60,7 +64,8 @@ struct tty_bufhead {
atomic_t priority;
struct tty_buffer sentinel;
struct llist_head free; /* Free queue head */
- atomic_t memory_used; /* In-use buffers excluding free list */
+ atomic_t mem_used; /* In-use buffers excluding free list */
+ int mem_limit;
struct tty_buffer *tail; /* Active buffer */
};
/*
@@ -423,7 +428,6 @@ extern int is_ignored(int sig);
extern int tty_signal(int sig, struct tty_struct *tty);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
-extern void tty_vhangup_locked(struct tty_struct *tty);
extern void tty_unhangup(struct file *filp);
extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 21ddd7d..c28dd52 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
+extern int tty_buffer_set_limit(struct tty_port *port, int limit);
extern int tty_buffer_space_avail(struct tty_port *port);
extern int tty_buffer_request_room(struct tty_port *port, size_t size);
extern int tty_insert_flip_string_flags(struct tty_port *port,
@@ -9,8 +10,6 @@ extern int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size);
extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
-extern int tty_prepare_flip_string_flags(struct tty_port *port,
- unsigned char **chars, char **flags, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
@@ -18,8 +17,12 @@ static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
{
struct tty_buffer *tb = port->buf.tail;
- if (tb && tb->used < tb->size) {
- *flag_buf_ptr(tb, tb->used) = flag;
+ int change;
+
+ change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL);
+ if (!change && tb->used < tb->size) {
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index f15c898..b8347c20 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -84,7 +84,8 @@
* processing. <cp> is a pointer to the buffer of input
* character received by the device. <fp> is a pointer to a
* pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc.
+ * received with a parity error, etc. <fp> may be NULL to indicate
+ * all data received is TTY_NORMAL.
*
* void (*write_wakeup)(struct tty_struct *);
*
@@ -118,7 +119,8 @@
* processing. <cp> is a pointer to the buffer of input
* character received by the device. <fp> is a pointer to a
* pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc.
+ * received with a parity error, etc. <fp> may be NULL to indicate
+ * all data received is TTY_NORMAL.
* If assigned, prefer this function for automatic flow control.
*/
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9d8cf05..ecd3319 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -25,13 +25,16 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void)
{
+#ifndef CONFIG_PREEMPT
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
preempt_count_dec();
- preempt_check_resched();
+#else
+ preempt_enable();
+#endif
}
#ifndef ARCH_HAS_NOCACHE_UACCESS
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 319eae7..e32251e 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,16 +26,13 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
+#include <linux/types.h>
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
-#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
-# include <asm/uprobes.h>
-#endif
-
#define UPROBE_HANDLER_REMOVE 1
#define UPROBE_HANDLER_MASK 1
@@ -60,6 +57,8 @@ struct uprobe_consumer {
};
#ifdef CONFIG_UPROBES
+#include <asm/uprobes.h>
+
enum uprobe_task_state {
UTASK_RUNNING,
UTASK_SSTEP,
@@ -72,35 +71,28 @@ enum uprobe_task_state {
*/
struct uprobe_task {
enum uprobe_task_state state;
- struct arch_uprobe_task autask;
- struct return_instance *return_instances;
- unsigned int depth;
- struct uprobe *active_uprobe;
+ union {
+ struct {
+ struct arch_uprobe_task autask;
+ unsigned long vaddr;
+ };
+ struct {
+ struct callback_head dup_xol_work;
+ unsigned long dup_xol_addr;
+ };
+ };
+
+ struct uprobe *active_uprobe;
unsigned long xol_vaddr;
- unsigned long vaddr;
-};
-/*
- * On a breakpoint hit, thread contests for a slot. It frees the
- * slot after singlestep. Currently a fixed number of slots are
- * allocated.
- */
-struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- atomic_t slot_count; /* number of in-use slots */
- unsigned long *bitmap; /* 0 = free slot */
- struct page *page;
-
- /*
- * We keep the vma's vm_start rather than a pointer to the vma
- * itself. The probed process or a naughty kernel module could make
- * the vma go away, and we must handle that reasonably gracefully.
- */
- unsigned long vaddr; /* Page(s) of instruction slots */
+ struct return_instance *return_instances;
+ unsigned int depth;
};
+struct xol_area;
+
struct uprobes_state {
struct xol_area *xol_area;
};
@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
+extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
{
return false;
}
-static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
-{
- return 0;
-}
static inline void uprobe_free_utask(struct task_struct *t)
{
}
diff --git a/include/linux/vme.h b/include/linux/vme.h
index c9d65bf..8cd6f19 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -164,7 +164,8 @@ int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
-int vme_slot_get(struct vme_dev *);
+int vme_slot_num(struct vme_dev *);
+int vme_bus_num(struct vme_dev *);
int vme_register_driver(struct vme_driver *, unsigned int);
void vme_unregister_driver(struct vme_driver *);
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index f5b72b3..c5165fd 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline bool vtime_accounting_enabled(void)
{
- if (static_key_false(&context_tracking_enabled)) {
- if (context_tracking_active())
+ if (context_tracking_is_enabled()) {
+ if (context_tracking_cpu_is_enabled())
return true;
}
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index dff4202..63fbba0 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -11,107 +11,10 @@
#ifndef _LINUX_ZORRO_H
#define _LINUX_ZORRO_H
-#include <linux/device.h>
-
-
- /*
- * Each Zorro board has a 32-bit ID of the form
- *
- * mmmmmmmmmmmmmmmmppppppppeeeeeeee
- *
- * with
- *
- * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh))
- * pppppppp 8-bit Product ID (assigned by manufacturer)
- * eeeeeeee 8-bit Extended Product ID (currently only used
- * for some GVP boards)
- */
-
-
-#define ZORRO_MANUF(id) ((id) >> 16)
-#define ZORRO_PROD(id) (((id) >> 8) & 0xff)
-#define ZORRO_EPC(id) ((id) & 0xff)
-
-#define ZORRO_ID(manuf, prod, epc) \
- ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc))
-
-typedef __u32 zorro_id;
-
-
-/* Include the ID list */
-#include <linux/zorro_ids.h>
-
- /*
- * GVP identifies most of its products through the 'extended product code'
- * (epc). The epc has to be ANDed with the GVP_PRODMASK before the
- * identification.
- */
-
-#define GVP_PRODMASK (0xf8)
-#define GVP_SCSICLKMASK (0x01)
-
-enum GVP_flags {
- GVP_IO = 0x01,
- GVP_ACCEL = 0x02,
- GVP_SCSI = 0x04,
- GVP_24BITDMA = 0x08,
- GVP_25BITDMA = 0x10,
- GVP_NOBANK = 0x20,
- GVP_14MHZ = 0x40,
-};
-
-
-struct Node {
- struct Node *ln_Succ; /* Pointer to next (successor) */
- struct Node *ln_Pred; /* Pointer to previous (predecessor) */
- __u8 ln_Type;
- __s8 ln_Pri; /* Priority, for sorting */
- __s8 *ln_Name; /* ID string, null terminated */
-} __attribute__ ((packed));
-
-struct ExpansionRom {
- /* -First 16 bytes of the expansion ROM */
- __u8 er_Type; /* Board type, size and flags */
- __u8 er_Product; /* Product number, assigned by manufacturer */
- __u8 er_Flags; /* Flags */
- __u8 er_Reserved03; /* Must be zero ($ff inverted) */
- __u16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */
- __u32 er_SerialNumber; /* Available for use by manufacturer */
- __u16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */
- __u8 er_Reserved0c;
- __u8 er_Reserved0d;
- __u8 er_Reserved0e;
- __u8 er_Reserved0f;
-} __attribute__ ((packed));
-
-/* er_Type board type bits */
-#define ERT_TYPEMASK 0xc0
-#define ERT_ZORROII 0xc0
-#define ERT_ZORROIII 0x80
-
-/* other bits defined in er_Type */
-#define ERTB_MEMLIST 5 /* Link RAM into free memory list */
-#define ERTF_MEMLIST (1<<5)
-
-struct ConfigDev {
- struct Node cd_Node;
- __u8 cd_Flags; /* (read/write) */
- __u8 cd_Pad; /* reserved */
- struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */
- void *cd_BoardAddr; /* where in memory the board was placed */
- __u32 cd_BoardSize; /* size of board in bytes */
- __u16 cd_SlotAddr; /* which slot number (PRIVATE) */
- __u16 cd_SlotSize; /* number of slots (PRIVATE) */
- void *cd_Driver; /* pointer to node of driver */
- struct ConfigDev *cd_NextCD; /* linked list of drivers to config */
- __u32 cd_Unused[4]; /* for whatever the driver wants */
-} __attribute__ ((packed));
-
-#define ZORRO_NUM_AUTO 16
-
-#ifdef __KERNEL__
+#include <uapi/linux/zorro.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mod_devicetable.h>
@@ -175,7 +78,23 @@ static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z)
extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */
-extern struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
+extern struct zorro_dev *zorro_autocon;
+
+
+ /*
+ * Minimal information about a Zorro device, passed from bootinfo
+ * Only available temporarily, i.e. until initmem has been freed!
+ */
+
+struct zorro_dev_init {
+ struct ExpansionRom rom;
+ u16 slotaddr;
+ u16 slotsize;
+ u32 boardaddr;
+ u32 boardsize;
+};
+
+extern struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
/*
@@ -229,6 +148,4 @@ extern DECLARE_BITMAP(zorro_unused_z2ram, 128);
#define Z2RAM_CHUNKSHIFT (16)
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_ZORRO_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 829627d..1d67fb6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -42,27 +42,10 @@ static inline bool net_busy_loop_on(void)
return sysctl_net_busy_poll;
}
-/* a wrapper to make debug_smp_processor_id() happy
- * we can use sched_clock() because we don't care much about precision
- * we only care that the average is bounded
- */
-#ifdef CONFIG_DEBUG_PREEMPT
-static inline u64 busy_loop_us_clock(void)
-{
- u64 rc;
-
- preempt_disable_notrace();
- rc = sched_clock();
- preempt_enable_no_resched_notrace();
-
- return rc >> 10;
-}
-#else /* CONFIG_DEBUG_PREEMPT */
static inline u64 busy_loop_us_clock(void)
{
- return sched_clock() >> 10;
+ return local_clock() >> 10;
}
-#endif /* CONFIG_DEBUG_PREEMPT */
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 76d5427..65bb130 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -165,7 +165,6 @@ struct inet6_dev {
struct net_device *dev;
struct list_head addr_list;
- int valid_ll_addr_cnt;
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 31e2de7..c0f0a13 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -142,7 +142,7 @@
#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
-#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO)
+#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
/* FRMR information field macros */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 67b5d00..0a248b3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1046,9 +1046,6 @@ struct sctp_outq {
/* Corked? */
char cork;
-
- /* Is this structure empty? */
- char empty;
};
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 979874c..61e1935 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -978,7 +978,7 @@ struct ib_uobject {
};
struct ib_udata {
- void __user *inbuf;
+ const void __user *inbuf;
void __user *outbuf;
size_t inlen;
size_t outlen;
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
index 88b8783..1c875ad 100644
--- a/include/trace/events/ras.h
+++ b/include/trace/events/ras.h
@@ -5,7 +5,7 @@
#define _TRACE_AER_H
#include <linux/tracepoint.h>
-#include <linux/edac.h>
+#include <linux/aer.h>
/*
@@ -63,10 +63,10 @@ TRACE_EVENT(aer_event,
TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
__get_str(dev_name),
- __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" :
- __entry->severity == HW_EVENT_ERR_FATAL ?
- "Fatal" : "Uncorrected",
- __entry->severity == HW_EVENT_ERR_CORRECTED ?
+ __entry->severity == AER_CORRECTABLE ? "Corrected" :
+ __entry->severity == AER_FATAL ?
+ "Fatal" : "Uncorrected, non-fatal",
+ __entry->severity == AER_CORRECTABLE ?
__print_flags(__entry->status, "|", aer_correctable_errors) :
__print_flags(__entry->status, "|", aer_uncorrectable_errors))
);
diff --git a/include/uapi/asm-generic/statfs.h b/include/uapi/asm-generic/statfs.h
index 0999647..cb89cc7 100644
--- a/include/uapi/asm-generic/statfs.h
+++ b/include/uapi/asm-generic/statfs.h
@@ -13,7 +13,7 @@
*/
#ifndef __statfs_word
#if __BITS_PER_LONG == 64
-#define __statfs_word long
+#define __statfs_word __kernel_long_t
#else
#define __statfs_word __u32
#endif
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 2f3f7ea..fe421e8 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -983,6 +983,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
/* CIK macrotile mode array */
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
+/* query the number of render backends */
+#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
struct drm_radeon_info {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 33d2b8f..3ce25b5 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -426,3 +426,5 @@ header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
header-y += hw_breakpoint.h
+header-y += zorro.h
+header-y += zorro_ids.h
diff --git a/include/uapi/linux/genwqe/genwqe_card.h b/include/uapi/linux/genwqe/genwqe_card.h
new file mode 100644
index 0000000..795e957
--- /dev/null
+++ b/include/uapi/linux/genwqe/genwqe_card.h
@@ -0,0 +1,500 @@
+#ifndef __GENWQE_CARD_H__
+#define __GENWQE_CARD_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * User-space API for the GenWQE card. For debugging and test purposes
+ * the register addresses are included here too.
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Basename of sysfs, debugfs and /dev interfaces */
+#define GENWQE_DEVNAME "genwqe"
+
+#define GENWQE_TYPE_ALTERA_230 0x00 /* GenWQE4 Stratix-IV-230 */
+#define GENWQE_TYPE_ALTERA_530 0x01 /* GenWQE4 Stratix-IV-530 */
+#define GENWQE_TYPE_ALTERA_A4 0x02 /* GenWQE5 A4 Stratix-V-A4 */
+#define GENWQE_TYPE_ALTERA_A7 0x03 /* GenWQE5 A7 Stratix-V-A7 */
+
+/* MMIO Unit offsets: Each UnitID occupies a defined address range */
+#define GENWQE_UID_OFFS(uid) ((uid) << 24)
+#define GENWQE_SLU_OFFS GENWQE_UID_OFFS(0)
+#define GENWQE_HSU_OFFS GENWQE_UID_OFFS(1)
+#define GENWQE_APP_OFFS GENWQE_UID_OFFS(2)
+#define GENWQE_MAX_UNITS 3
+
+/* Common offsets per UnitID */
+#define IO_EXTENDED_ERROR_POINTER 0x00000048
+#define IO_ERROR_INJECT_SELECTOR 0x00000060
+#define IO_EXTENDED_DIAG_SELECTOR 0x00000070
+#define IO_EXTENDED_DIAG_READ_MBX 0x00000078
+#define IO_EXTENDED_DIAG_MAP(ring) (0x00000500 | ((ring) << 3))
+
+#define GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace) (((ring) << 8) | (trace))
+
+/* UnitID 0: Service Layer Unit (SLU) */
+
+/* SLU: Unit Configuration Register */
+#define IO_SLU_UNITCFG 0x00000000
+#define IO_SLU_UNITCFG_TYPE_MASK 0x000000000ff00000 /* 27:20 */
+
+/* SLU: Fault Isolation Register (FIR) (ac_slu_fir) */
+#define IO_SLU_FIR 0x00000008 /* read only, wr direct */
+#define IO_SLU_FIR_CLR 0x00000010 /* read and clear */
+
+/* SLU: First Error Capture Register (FEC/WOF) */
+#define IO_SLU_FEC 0x00000018
+
+#define IO_SLU_ERR_ACT_MASK 0x00000020
+#define IO_SLU_ERR_ATTN_MASK 0x00000028
+#define IO_SLU_FIRX1_ACT_MASK 0x00000030
+#define IO_SLU_FIRX0_ACT_MASK 0x00000038
+#define IO_SLU_SEC_LEM_DEBUG_OVR 0x00000040
+#define IO_SLU_EXTENDED_ERR_PTR 0x00000048
+#define IO_SLU_COMMON_CONFIG 0x00000060
+
+#define IO_SLU_FLASH_FIR 0x00000108
+#define IO_SLU_SLC_FIR 0x00000110
+#define IO_SLU_RIU_TRAP 0x00000280
+#define IO_SLU_FLASH_FEC 0x00000308
+#define IO_SLU_SLC_FEC 0x00000310
+
+/*
+ * The Virtual Function's Access is from offset 0x00010000
+ * The Physical Function's Access is from offset 0x00050000
+ * Single Shared Registers exists only at offset 0x00060000
+ *
+ * SLC: Queue Virtual Window Window for accessing into a specific VF
+ * queue. When accessing the 0x10000 space using the 0x50000 address
+ * segment, the value indicated here is used to specify which VF
+ * register is decoded. This register, and the 0x50000 register space
+ * can only be accessed by the PF. Example, if this register is set to
+ * 0x2, then a read from 0x50000 is the same as a read from 0x10000
+ * from VF=2.
+ */
+
+/* SLC: Queue Segment */
+#define IO_SLC_QUEUE_SEGMENT 0x00010000
+#define IO_SLC_VF_QUEUE_SEGMENT 0x00050000
+
+/* SLC: Queue Offset */
+#define IO_SLC_QUEUE_OFFSET 0x00010008
+#define IO_SLC_VF_QUEUE_OFFSET 0x00050008
+
+/* SLC: Queue Configuration */
+#define IO_SLC_QUEUE_CONFIG 0x00010010
+#define IO_SLC_VF_QUEUE_CONFIG 0x00050010
+
+/* SLC: Job Timout/Only accessible for the PF */
+#define IO_SLC_APPJOB_TIMEOUT 0x00010018
+#define IO_SLC_VF_APPJOB_TIMEOUT 0x00050018
+#define TIMEOUT_250MS 0x0000000f
+#define HEARTBEAT_DISABLE 0x0000ff00
+
+/* SLC: Queue InitSequence Register */
+#define IO_SLC_QUEUE_INITSQN 0x00010020
+#define IO_SLC_VF_QUEUE_INITSQN 0x00050020
+
+/* SLC: Queue Wrap */
+#define IO_SLC_QUEUE_WRAP 0x00010028
+#define IO_SLC_VF_QUEUE_WRAP 0x00050028
+
+/* SLC: Queue Status */
+#define IO_SLC_QUEUE_STATUS 0x00010100
+#define IO_SLC_VF_QUEUE_STATUS 0x00050100
+
+/* SLC: Queue Working Time */
+#define IO_SLC_QUEUE_WTIME 0x00010030
+#define IO_SLC_VF_QUEUE_WTIME 0x00050030
+
+/* SLC: Queue Error Counts */
+#define IO_SLC_QUEUE_ERRCNTS 0x00010038
+#define IO_SLC_VF_QUEUE_ERRCNTS 0x00050038
+
+/* SLC: Queue Loast Response Word */
+#define IO_SLC_QUEUE_LRW 0x00010040
+#define IO_SLC_VF_QUEUE_LRW 0x00050040
+
+/* SLC: Freerunning Timer */
+#define IO_SLC_FREE_RUNNING_TIMER 0x00010108
+#define IO_SLC_VF_FREE_RUNNING_TIMER 0x00050108
+
+/* SLC: Queue Virtual Access Region */
+#define IO_PF_SLC_VIRTUAL_REGION 0x00050000
+
+/* SLC: Queue Virtual Window */
+#define IO_PF_SLC_VIRTUAL_WINDOW 0x00060000
+
+/* SLC: DDCB Application Job Pending [n] (n=0:63) */
+#define IO_PF_SLC_JOBPEND(n) (0x00061000 + 8*(n))
+#define IO_SLC_JOBPEND(n) IO_PF_SLC_JOBPEND(n)
+
+/* SLC: Parser Trap RAM [n] (n=0:31) */
+#define IO_SLU_SLC_PARSE_TRAP(n) (0x00011000 + 8*(n))
+
+/* SLC: Dispatcher Trap RAM [n] (n=0:31) */
+#define IO_SLU_SLC_DISP_TRAP(n) (0x00011200 + 8*(n))
+
+/* Global Fault Isolation Register (GFIR) */
+#define IO_SLC_CFGREG_GFIR 0x00020000
+#define GFIR_ERR_TRIGGER 0x0000ffff
+
+/* SLU: Soft Reset Register */
+#define IO_SLC_CFGREG_SOFTRESET 0x00020018
+
+/* SLU: Misc Debug Register */
+#define IO_SLC_MISC_DEBUG 0x00020060
+#define IO_SLC_MISC_DEBUG_CLR 0x00020068
+#define IO_SLC_MISC_DEBUG_SET 0x00020070
+
+/* Temperature Sensor Reading */
+#define IO_SLU_TEMPERATURE_SENSOR 0x00030000
+#define IO_SLU_TEMPERATURE_CONFIG 0x00030008
+
+/* Voltage Margining Control */
+#define IO_SLU_VOLTAGE_CONTROL 0x00030080
+#define IO_SLU_VOLTAGE_NOMINAL 0x00000000
+#define IO_SLU_VOLTAGE_DOWN5 0x00000006
+#define IO_SLU_VOLTAGE_UP5 0x00000007
+
+/* Direct LED Control Register */
+#define IO_SLU_LEDCONTROL 0x00030100
+
+/* SLU: Flashbus Direct Access -A5 */
+#define IO_SLU_FLASH_DIRECTACCESS 0x00040010
+
+/* SLU: Flashbus Direct Access2 -A5 */
+#define IO_SLU_FLASH_DIRECTACCESS2 0x00040020
+
+/* SLU: Flashbus Command Interface -A5 */
+#define IO_SLU_FLASH_CMDINTF 0x00040030
+
+/* SLU: BitStream Loaded */
+#define IO_SLU_BITSTREAM 0x00040040
+
+/* This Register has a switch which will change the CAs to UR */
+#define IO_HSU_ERR_BEHAVIOR 0x01001010
+
+#define IO_SLC2_SQB_TRAP 0x00062000
+#define IO_SLC2_QUEUE_MANAGER_TRAP 0x00062008
+#define IO_SLC2_FLS_MASTER_TRAP 0x00062010
+
+/* UnitID 1: HSU Registers */
+#define IO_HSU_UNITCFG 0x01000000
+#define IO_HSU_FIR 0x01000008
+#define IO_HSU_FIR_CLR 0x01000010
+#define IO_HSU_FEC 0x01000018
+#define IO_HSU_ERR_ACT_MASK 0x01000020
+#define IO_HSU_ERR_ATTN_MASK 0x01000028
+#define IO_HSU_FIRX1_ACT_MASK 0x01000030
+#define IO_HSU_FIRX0_ACT_MASK 0x01000038
+#define IO_HSU_SEC_LEM_DEBUG_OVR 0x01000040
+#define IO_HSU_EXTENDED_ERR_PTR 0x01000048
+#define IO_HSU_COMMON_CONFIG 0x01000060
+
+/* UnitID 2: Application Unit (APP) */
+#define IO_APP_UNITCFG 0x02000000
+#define IO_APP_FIR 0x02000008
+#define IO_APP_FIR_CLR 0x02000010
+#define IO_APP_FEC 0x02000018
+#define IO_APP_ERR_ACT_MASK 0x02000020
+#define IO_APP_ERR_ATTN_MASK 0x02000028
+#define IO_APP_FIRX1_ACT_MASK 0x02000030
+#define IO_APP_FIRX0_ACT_MASK 0x02000038
+#define IO_APP_SEC_LEM_DEBUG_OVR 0x02000040
+#define IO_APP_EXTENDED_ERR_PTR 0x02000048
+#define IO_APP_COMMON_CONFIG 0x02000060
+
+#define IO_APP_DEBUG_REG_01 0x02010000
+#define IO_APP_DEBUG_REG_02 0x02010008
+#define IO_APP_DEBUG_REG_03 0x02010010
+#define IO_APP_DEBUG_REG_04 0x02010018
+#define IO_APP_DEBUG_REG_05 0x02010020
+#define IO_APP_DEBUG_REG_06 0x02010028
+#define IO_APP_DEBUG_REG_07 0x02010030
+#define IO_APP_DEBUG_REG_08 0x02010038
+#define IO_APP_DEBUG_REG_09 0x02010040
+#define IO_APP_DEBUG_REG_10 0x02010048
+#define IO_APP_DEBUG_REG_11 0x02010050
+#define IO_APP_DEBUG_REG_12 0x02010058
+#define IO_APP_DEBUG_REG_13 0x02010060
+#define IO_APP_DEBUG_REG_14 0x02010068
+#define IO_APP_DEBUG_REG_15 0x02010070
+#define IO_APP_DEBUG_REG_16 0x02010078
+#define IO_APP_DEBUG_REG_17 0x02010080
+#define IO_APP_DEBUG_REG_18 0x02010088
+
+/* Read/write from/to registers */
+struct genwqe_reg_io {
+ __u64 num; /* register offset/address */
+ __u64 val64;
+};
+
+/*
+ * All registers of our card will return values not equal this values.
+ * If we see IO_ILLEGAL_VALUE on any of our MMIO register reads, the
+ * card can be considered as unusable. It will need recovery.
+ */
+#define IO_ILLEGAL_VALUE 0xffffffffffffffffull
+
+/*
+ * Generic DDCB execution interface.
+ *
+ * This interface is a first prototype resulting from discussions we
+ * had with other teams which wanted to use the Genwqe card. It allows
+ * to issue a DDCB request in a generic way. The request will block
+ * until it finishes or time out with error.
+ *
+ * Some DDCBs require DMA addresses to be specified in the ASIV
+ * block. The interface provies the capability to let the kernel
+ * driver know where those addresses are by specifying the ATS field,
+ * such that it can replace the user-space addresses with appropriate
+ * DMA addresses or DMA addresses of a scatter gather list which is
+ * dynamically created.
+ *
+ * Our hardware will refuse DDCB execution if the ATS field is not as
+ * expected. That means the DDCB execution engine in the chip knows
+ * where it expects DMA addresses within the ASIV part of the DDCB and
+ * will check that against the ATS field definition. Any invalid or
+ * unknown ATS content will lead to DDCB refusal.
+ */
+
+/* Genwqe chip Units */
+#define DDCB_ACFUNC_SLU 0x00 /* chip service layer unit */
+#define DDCB_ACFUNC_APP 0x01 /* chip application */
+
+/* DDCB return codes (RETC) */
+#define DDCB_RETC_IDLE 0x0000 /* Unexecuted/DDCB created */
+#define DDCB_RETC_PENDING 0x0101 /* Pending Execution */
+#define DDCB_RETC_COMPLETE 0x0102 /* Cmd complete. No error */
+#define DDCB_RETC_FAULT 0x0104 /* App Err, recoverable */
+#define DDCB_RETC_ERROR 0x0108 /* App Err, non-recoverable */
+#define DDCB_RETC_FORCED_ERROR 0x01ff /* overwritten by driver */
+
+#define DDCB_RETC_UNEXEC 0x0110 /* Unexe/Removed from queue */
+#define DDCB_RETC_TERM 0x0120 /* Terminated */
+#define DDCB_RETC_RES0 0x0140 /* Reserved */
+#define DDCB_RETC_RES1 0x0180 /* Reserved */
+
+/* DDCB Command Options (CMDOPT) */
+#define DDCB_OPT_ECHO_FORCE_NO 0x0000 /* ECHO DDCB */
+#define DDCB_OPT_ECHO_FORCE_102 0x0001 /* force return code */
+#define DDCB_OPT_ECHO_FORCE_104 0x0002
+#define DDCB_OPT_ECHO_FORCE_108 0x0003
+
+#define DDCB_OPT_ECHO_FORCE_110 0x0004 /* only on PF ! */
+#define DDCB_OPT_ECHO_FORCE_120 0x0005
+#define DDCB_OPT_ECHO_FORCE_140 0x0006
+#define DDCB_OPT_ECHO_FORCE_180 0x0007
+
+#define DDCB_OPT_ECHO_COPY_NONE (0 << 5)
+#define DDCB_OPT_ECHO_COPY_ALL (1 << 5)
+
+/* Definitions of Service Layer Commands */
+#define SLCMD_ECHO_SYNC 0x00 /* PF/VF */
+#define SLCMD_MOVE_FLASH 0x06 /* PF only */
+#define SLCMD_MOVE_FLASH_FLAGS_MODE 0x03 /* bit 0 and 1 used for mode */
+#define SLCMD_MOVE_FLASH_FLAGS_DLOAD 0 /* mode: download */
+#define SLCMD_MOVE_FLASH_FLAGS_EMUL 1 /* mode: emulation */
+#define SLCMD_MOVE_FLASH_FLAGS_UPLOAD 2 /* mode: upload */
+#define SLCMD_MOVE_FLASH_FLAGS_VERIFY 3 /* mode: verify */
+#define SLCMD_MOVE_FLASH_FLAG_NOTAP (1 << 2)/* just dump DDCB and exit */
+#define SLCMD_MOVE_FLASH_FLAG_POLL (1 << 3)/* wait for RETC >= 0102 */
+#define SLCMD_MOVE_FLASH_FLAG_PARTITION (1 << 4)
+#define SLCMD_MOVE_FLASH_FLAG_ERASE (1 << 5)
+
+enum genwqe_card_state {
+ GENWQE_CARD_UNUSED = 0,
+ GENWQE_CARD_USED = 1,
+ GENWQE_CARD_FATAL_ERROR = 2,
+ GENWQE_CARD_STATE_MAX,
+};
+
+/* common struct for chip image exchange */
+struct genwqe_bitstream {
+ __u64 data_addr; /* pointer to image data */
+ __u32 size; /* size of image file */
+ __u32 crc; /* crc of this image */
+ __u64 target_addr; /* starting address in Flash */
+ __u32 partition; /* '0', '1', or 'v' */
+ __u32 uid; /* 1=host/x=dram */
+
+ __u64 slu_id; /* informational/sim: SluID */
+ __u64 app_id; /* informational/sim: AppID */
+
+ __u16 retc; /* returned from processing */
+ __u16 attn; /* attention code from processing */
+ __u32 progress; /* progress code from processing */
+};
+
+/* Issuing a specific DDCB command */
+#define DDCB_LENGTH 256 /* for debug data */
+#define DDCB_ASIV_LENGTH 104 /* len of the DDCB ASIV array */
+#define DDCB_ASIV_LENGTH_ATS 96 /* ASIV in ATS architecture */
+#define DDCB_ASV_LENGTH 64 /* len of the DDCB ASV array */
+#define DDCB_FIXUPS 12 /* maximum number of fixups */
+
+struct genwqe_debug_data {
+ char driver_version[64];
+ __u64 slu_unitcfg;
+ __u64 app_unitcfg;
+
+ __u8 ddcb_before[DDCB_LENGTH];
+ __u8 ddcb_prev[DDCB_LENGTH];
+ __u8 ddcb_finished[DDCB_LENGTH];
+};
+
+/*
+ * Address Translation Specification (ATS) definitions
+ *
+ * Each 4 bit within the ATS 64-bit word specify the required address
+ * translation at the defined offset.
+ *
+ * 63 LSB
+ * 6666.5555.5555.5544.4444.4443.3333.3333 ... 11
+ * 3210.9876.5432.1098.7654.3210.9876.5432 ... 1098.7654.3210
+ *
+ * offset: 0x00 0x08 0x10 0x18 0x20 0x28 0x30 0x38 ... 0x68 0x70 0x78
+ * res res res res ASIV ...
+ * The first 4 entries in the ATS word are reserved. The following nibbles
+ * each describe at an 8 byte offset the format of the required data.
+ */
+#define ATS_TYPE_DATA 0x0ull /* data */
+#define ATS_TYPE_FLAT_RD 0x4ull /* flat buffer read only */
+#define ATS_TYPE_FLAT_RDWR 0x5ull /* flat buffer read/write */
+#define ATS_TYPE_SGL_RD 0x6ull /* sgl read only */
+#define ATS_TYPE_SGL_RDWR 0x7ull /* sgl read/write */
+
+#define ATS_SET_FLAGS(_struct, _field, _flags) \
+ (((_flags) & 0xf) << (44 - (4 * (offsetof(_struct, _field) / 8))))
+
+#define ATS_GET_FLAGS(_ats, _byte_offs) \
+ (((_ats) >> (44 - (4 * ((_byte_offs) / 8)))) & 0xf)
+
+/**
+ * struct genwqe_ddcb_cmd - User parameter for generic DDCB commands
+ *
+ * On the way into the kernel the driver will read the whole data
+ * structure. On the way out the driver will not copy the ASIV data
+ * back to user-space.
+ */
+struct genwqe_ddcb_cmd {
+ /* START of data copied to/from driver */
+ __u64 next_addr; /* chaining genwqe_ddcb_cmd */
+ __u64 flags; /* reserved */
+
+ __u8 acfunc; /* accelerators functional unit */
+ __u8 cmd; /* command to execute */
+ __u8 asiv_length; /* used parameter length */
+ __u8 asv_length; /* length of valid return values */
+ __u16 cmdopts; /* command options */
+ __u16 retc; /* return code from processing */
+
+ __u16 attn; /* attention code from processing */
+ __u16 vcrc; /* variant crc16 */
+ __u32 progress; /* progress code from processing */
+
+ __u64 deque_ts; /* dequeue time stamp */
+ __u64 cmplt_ts; /* completion time stamp */
+ __u64 disp_ts; /* SW processing start */
+
+ /* move to end and avoid copy-back */
+ __u64 ddata_addr; /* collect debug data */
+
+ /* command specific values */
+ __u8 asv[DDCB_ASV_LENGTH];
+
+ /* END of data copied from driver */
+ union {
+ struct {
+ __u64 ats;
+ __u8 asiv[DDCB_ASIV_LENGTH_ATS];
+ };
+ /* used for flash update to keep it backward compatible */
+ __u8 __asiv[DDCB_ASIV_LENGTH];
+ };
+ /* END of data copied to driver */
+};
+
+#define GENWQE_IOC_CODE 0xa5
+
+/* Access functions */
+#define GENWQE_READ_REG64 _IOR(GENWQE_IOC_CODE, 30, struct genwqe_reg_io)
+#define GENWQE_WRITE_REG64 _IOW(GENWQE_IOC_CODE, 31, struct genwqe_reg_io)
+#define GENWQE_READ_REG32 _IOR(GENWQE_IOC_CODE, 32, struct genwqe_reg_io)
+#define GENWQE_WRITE_REG32 _IOW(GENWQE_IOC_CODE, 33, struct genwqe_reg_io)
+#define GENWQE_READ_REG16 _IOR(GENWQE_IOC_CODE, 34, struct genwqe_reg_io)
+#define GENWQE_WRITE_REG16 _IOW(GENWQE_IOC_CODE, 35, struct genwqe_reg_io)
+
+#define GENWQE_GET_CARD_STATE _IOR(GENWQE_IOC_CODE, 36, enum genwqe_card_state)
+
+/**
+ * struct genwqe_mem - Memory pinning/unpinning information
+ * @addr: virtual user space address
+ * @size: size of the area pin/dma-map/unmap
+ * direction: 0: read/1: read and write
+ *
+ * Avoid pinning and unpinning of memory pages dynamically. Instead
+ * the idea is to pin the whole buffer space required for DDCB
+ * opertionas in advance. The driver will reuse this pinning and the
+ * memory associated with it to setup the sglists for the DDCB
+ * requests without the need to allocate and free memory or map and
+ * unmap to get the DMA addresses.
+ *
+ * The inverse operation needs to be called after the pinning is not
+ * needed anymore. The pinnings else the pinnings will get removed
+ * after the device is closed. Note that pinnings will required
+ * memory.
+ */
+struct genwqe_mem {
+ __u64 addr;
+ __u64 size;
+ __u64 direction;
+ __u64 flags;
+};
+
+#define GENWQE_PIN_MEM _IOWR(GENWQE_IOC_CODE, 40, struct genwqe_mem)
+#define GENWQE_UNPIN_MEM _IOWR(GENWQE_IOC_CODE, 41, struct genwqe_mem)
+
+/*
+ * Generic synchronous DDCB execution interface.
+ * Synchronously execute a DDCB.
+ *
+ * Return: 0 on success or negative error code.
+ * -EINVAL: Invalid parameters (ASIV_LEN, ASV_LEN, illegal fixups
+ * no mappings found/could not create mappings
+ * -EFAULT: illegal addresses in fixups, purging failed
+ * -EBADMSG: enqueing failed, retc != DDCB_RETC_COMPLETE
+ */
+#define GENWQE_EXECUTE_DDCB \
+ _IOWR(GENWQE_IOC_CODE, 50, struct genwqe_ddcb_cmd)
+
+#define GENWQE_EXECUTE_RAW_DDCB \
+ _IOWR(GENWQE_IOC_CODE, 51, struct genwqe_ddcb_cmd)
+
+/* Service Layer functions (PF only) */
+#define GENWQE_SLU_UPDATE _IOWR(GENWQE_IOC_CODE, 80, struct genwqe_bitstream)
+#define GENWQE_SLU_READ _IOWR(GENWQE_IOC_CODE, 81, struct genwqe_bitstream)
+
+#endif /* __GENWQE_CARD_H__ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index ecc8859..bd24470 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -464,7 +464,8 @@ struct input_keymap_entry {
#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */
#define KEY_DISPLAY_OFF 245 /* display device to off state */
-#define KEY_WIMAX 246
+#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */
+#define KEY_WIMAX KEY_WWAN
#define KEY_RFKILL 247 /* Key that controls all radios */
#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 104838f..d6629d4 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -18,6 +18,7 @@
*/
#define KEXEC_ARCH_DEFAULT ( 0 << 16)
#define KEXEC_ARCH_386 ( 3 << 16)
+#define KEXEC_ARCH_68K ( 4 << 16)
#define KEXEC_ARCH_X86_64 (62 << 16)
#define KEXEC_ARCH_PPC (20 << 16)
#define KEXEC_ARCH_PPC64 (21 << 16)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 959d454..e244ed4 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -725,6 +725,7 @@ enum perf_callchain_context {
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_CLOEXEC (1U << 3) /* O_CLOEXEC */
union perf_mem_data_src {
__u64 val;
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 5a0f945..34f9d73 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -39,8 +39,14 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+#define SCHED_DEADLINE 6
+
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
+/*
+ * For the sched_{set,get}attr() calls
+ */
+#define SCHED_FLAG_RESET_ON_FORK 0x01
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/zorro.h b/include/uapi/linux/zorro.h
new file mode 100644
index 0000000..59d021b
--- /dev/null
+++ b/include/uapi/linux/zorro.h
@@ -0,0 +1,113 @@
+/*
+ * linux/zorro.h -- Amiga AutoConfig (Zorro) Bus Definitions
+ *
+ * Copyright (C) 1995--2003 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_LINUX_ZORRO_H
+#define _UAPI_LINUX_ZORRO_H
+
+#include <linux/types.h>
+
+
+ /*
+ * Each Zorro board has a 32-bit ID of the form
+ *
+ * mmmmmmmmmmmmmmmmppppppppeeeeeeee
+ *
+ * with
+ *
+ * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh))
+ * pppppppp 8-bit Product ID (assigned by manufacturer)
+ * eeeeeeee 8-bit Extended Product ID (currently only used
+ * for some GVP boards)
+ */
+
+
+#define ZORRO_MANUF(id) ((id) >> 16)
+#define ZORRO_PROD(id) (((id) >> 8) & 0xff)
+#define ZORRO_EPC(id) ((id) & 0xff)
+
+#define ZORRO_ID(manuf, prod, epc) \
+ ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc))
+
+typedef __u32 zorro_id;
+
+
+/* Include the ID list */
+#include <linux/zorro_ids.h>
+
+
+ /*
+ * GVP identifies most of its products through the 'extended product code'
+ * (epc). The epc has to be ANDed with the GVP_PRODMASK before the
+ * identification.
+ */
+
+#define GVP_PRODMASK (0xf8)
+#define GVP_SCSICLKMASK (0x01)
+
+enum GVP_flags {
+ GVP_IO = 0x01,
+ GVP_ACCEL = 0x02,
+ GVP_SCSI = 0x04,
+ GVP_24BITDMA = 0x08,
+ GVP_25BITDMA = 0x10,
+ GVP_NOBANK = 0x20,
+ GVP_14MHZ = 0x40,
+};
+
+
+struct Node {
+ __be32 ln_Succ; /* Pointer to next (successor) */
+ __be32 ln_Pred; /* Pointer to previous (predecessor) */
+ __u8 ln_Type;
+ __s8 ln_Pri; /* Priority, for sorting */
+ __be32 ln_Name; /* ID string, null terminated */
+} __packed;
+
+struct ExpansionRom {
+ /* -First 16 bytes of the expansion ROM */
+ __u8 er_Type; /* Board type, size and flags */
+ __u8 er_Product; /* Product number, assigned by manufacturer */
+ __u8 er_Flags; /* Flags */
+ __u8 er_Reserved03; /* Must be zero ($ff inverted) */
+ __be16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */
+ __be32 er_SerialNumber; /* Available for use by manufacturer */
+ __be16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */
+ __u8 er_Reserved0c;
+ __u8 er_Reserved0d;
+ __u8 er_Reserved0e;
+ __u8 er_Reserved0f;
+} __packed;
+
+/* er_Type board type bits */
+#define ERT_TYPEMASK 0xc0
+#define ERT_ZORROII 0xc0
+#define ERT_ZORROIII 0x80
+
+/* other bits defined in er_Type */
+#define ERTB_MEMLIST 5 /* Link RAM into free memory list */
+#define ERTF_MEMLIST (1<<5)
+
+struct ConfigDev {
+ struct Node cd_Node;
+ __u8 cd_Flags; /* (read/write) */
+ __u8 cd_Pad; /* reserved */
+ struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */
+ __be32 cd_BoardAddr; /* where in memory the board was placed */
+ __be32 cd_BoardSize; /* size of board in bytes */
+ __be16 cd_SlotAddr; /* which slot number (PRIVATE) */
+ __be16 cd_SlotSize; /* number of slots (PRIVATE) */
+ __be32 cd_Driver; /* pointer to node of driver */
+ __be32 cd_NextCD; /* linked list of drivers to config */
+ __be32 cd_Unused[4]; /* for whatever the driver wants */
+} __packed;
+
+#define ZORRO_NUM_AUTO 16
+
+#endif /* _UAPI_LINUX_ZORRO_H */
diff --git a/include/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h
index 74bc53b..74bc53b 100644
--- a/include/linux/zorro_ids.h
+++ b/include/uapi/linux/zorro_ids.h
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
index 8c5fa0e..dc3193f 100644
--- a/include/xen/interface/callback.h
+++ b/include/xen/interface/callback.h
@@ -36,7 +36,7 @@
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
-/* ia64, x86: Callback for event delivery. */
+/* x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
index 056744b..545a14b 100644
--- a/include/xen/interface/io/protocols.h
+++ b/include/xen/interface/io/protocols.h
@@ -3,7 +3,6 @@
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
-#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
@@ -11,8 +10,6 @@
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
-#elif defined(__ia64__)
-# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
#elif defined(__powerpc64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
#elif defined(__arm__) || defined(__aarch64__)
diff --git a/init/Kconfig b/init/Kconfig
index 4e5d96a..5236dc5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -532,7 +532,7 @@ config CONTEXT_TRACKING_FORCE
dynticks subsystem by forcing the context tracking on all
CPUs in the system.
- Say Y only if you're working on the developpement of an
+ Say Y only if you're working on the development of an
architecture backend for the context tracking.
Say N otherwise, this option brings an overhead that you
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8b729c2..bc1dcab 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
struct cgroup *cgrp = dentry->d_fsdata;
BUG_ON(!(cgroup_is_dead(cgrp)));
+
+ /*
+ * XXX: cgrp->id is only used to look up css's. As cgroup
+ * and css's lifetimes will be decoupled, it should be made
+ * per-subsystem and moved to css->id so that lookups are
+ * successful until the target css is released.
+ */
+ idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ cgrp->id = -1;
+
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
} else {
struct cfent *cfe = __d_cfe(dentry);
@@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
+ rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
call_rcu(&css->rcu_head, css_free_rcu_fn);
}
@@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
root->number_of_cgroups++;
- /* each css holds a ref to the cgroup's dentry and the parent css */
- for_each_root_subsys(root, ss) {
- struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
- dget(dentry);
- css_get(css->parent);
- }
-
/* hold a ref to the parent's dentry */
dget(parent->dentry);
@@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (err)
goto err_destroy;
+ /* each css holds a ref to the cgroup's dentry and parent css */
+ dget(dentry);
+ css_get(css->parent);
+
+ /* mark it consumed for error path */
+ css_ar[ss->subsys_id] = NULL;
+
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
parent->parent) {
pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4491,6 +4501,14 @@ err_free_cgrp:
return err;
err_destroy:
+ for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+ if (css) {
+ percpu_ref_cancel_init(&css->refcnt);
+ ss->css_free(css);
+ }
+ }
cgroup_destroy_locked(cgrp);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* will be invoked to perform the rest of destruction once the
* percpu refs of all css's are confirmed to be killed.
*/
- for_each_root_subsys(cgrp->root, ss)
- kill_css(cgroup_css(cgrp, ss));
+ for_each_root_subsys(cgrp->root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
+ if (css)
+ kill_css(css);
+ }
/*
* Mark @cgrp dead. This prevents further task migration and child
@@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
/* delete this cgroup from parent->children */
list_del_rcu(&cgrp->sibling);
- /*
- * We should remove the cgroup object from idr before its grace
- * period starts, so we won't be looking up a cgroup while the
- * cgroup is being freed.
- */
- idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
- cgrp->id = -1;
-
dput(d);
set_bit(CGRP_RELEASABLE, &parent->flags);
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index e5f3917..6cb20d2 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -53,10 +53,10 @@ void context_tracking_user_enter(void)
/*
* Repeat the user_enter() check here because some archs may be calling
* this from asm and if no CPU needs context tracking, they shouldn't
- * go further. Repeat the check here until they support the static key
- * check.
+ * go further. Repeat the check here until they support the inline static
+ * key check.
*/
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return;
/*
@@ -160,7 +160,7 @@ void context_tracking_user_exit(void)
{
unsigned long flags;
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return;
if (in_interrupt())
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index 988573a..277f494 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -105,14 +105,17 @@ static void cpu_idle_loop(void)
__current_set_polling();
}
arch_cpu_idle_exit();
- /*
- * We need to test and propagate the TIF_NEED_RESCHED
- * bit here because we might not have send the
- * reschedule IPI to idle tasks.
- */
- if (tif_need_resched())
- set_preempt_need_resched();
}
+
+ /*
+ * Since we fell out of the loop above, we know
+ * TIF_NEED_RESCHED must be set, propagate it into
+ * PREEMPT_NEED_RESCHED.
+ *
+ * This is required because for polling idle loops we will
+ * not have had an IPI to fold the state for us.
+ */
+ preempt_set_need_resched();
tick_nohz_idle_exit();
schedule_preempt_disabled();
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f574401..56003c6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -119,7 +119,8 @@ static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
- PERF_FLAG_PID_CGROUP)
+ PERF_FLAG_PID_CGROUP |\
+ PERF_FLAG_FD_CLOEXEC)
/*
* branch priv levels that need permission checks
@@ -3542,7 +3543,7 @@ static void perf_event_for_each(struct perf_event *event,
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
- int ret = 0;
+ int ret = 0, active;
u64 value;
if (!is_sampling_event(event))
@@ -3566,6 +3567,20 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->attr.sample_period = value;
event->hw.sample_period = value;
}
+
+ active = (event->state == PERF_EVENT_STATE_ACTIVE);
+ if (active) {
+ perf_pmu_disable(ctx->pmu);
+ event->pmu->stop(event, PERF_EF_UPDATE);
+ }
+
+ local64_set(&event->hw.period_left, 0);
+
+ if (active) {
+ event->pmu->start(event, PERF_EF_RELOAD);
+ perf_pmu_enable(ctx->pmu);
+ }
+
unlock:
raw_spin_unlock_irq(&ctx->lock);
@@ -6670,6 +6685,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
+ INIT_LIST_HEAD(&event->active_entry);
+ INIT_HLIST_NODE(&event->hlist_entry);
+
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
@@ -6980,6 +6998,7 @@ SYSCALL_DEFINE5(perf_event_open,
int event_fd;
int move_group = 0;
int err;
+ int f_flags = O_RDWR;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
@@ -7008,7 +7027,10 @@ SYSCALL_DEFINE5(perf_event_open,
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
- event_fd = get_unused_fd();
+ if (flags & PERF_FLAG_FD_CLOEXEC)
+ f_flags |= O_CLOEXEC;
+
+ event_fd = get_unused_fd_flags(f_flags);
if (event_fd < 0)
return event_fd;
@@ -7130,7 +7152,8 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context;
}
- event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
+ event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
+ f_flags);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
goto err_context;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index e8b168a..146a579 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -61,19 +61,20 @@ again:
*
* kernel user
*
- * READ ->data_tail READ ->data_head
- * smp_mb() (A) smp_rmb() (C)
- * WRITE $data READ $data
- * smp_wmb() (B) smp_mb() (D)
- * STORE ->data_head WRITE ->data_tail
+ * if (LOAD ->data_tail) { LOAD ->data_head
+ * (A) smp_rmb() (C)
+ * STORE $data LOAD $data
+ * smp_wmb() (B) smp_mb() (D)
+ * STORE ->data_head STORE ->data_tail
+ * }
*
* Where A pairs with D, and B pairs with C.
*
- * I don't think A needs to be a full barrier because we won't in fact
- * write data until we see the store from userspace. So we simply don't
- * issue the data WRITE until we observe it. Be conservative for now.
+ * In our case (A) is a control dependency that separates the load of
+ * the ->data_tail and the stores of $data. In case ->data_tail
+ * indicates there is no room in the buffer to store $data we do not.
*
- * OTOH, D needs to be a full barrier since it separates the data READ
+ * D needs to be a full barrier since it separates the data READ
* from the tail WRITE.
*
* For B a WMB is sufficient since it separates two WRITEs, and for C
@@ -81,7 +82,7 @@ again:
*
* See perf_output_begin().
*/
- smp_wmb();
+ smp_wmb(); /* B, matches C */
rb->user_page->data_head = head;
/*
@@ -144,17 +145,26 @@ int perf_output_begin(struct perf_output_handle *handle,
if (!rb->overwrite &&
unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
goto fail;
+
+ /*
+ * The above forms a control dependency barrier separating the
+ * @tail load above from the data stores below. Since the @tail
+ * load is required to compute the branch to fail below.
+ *
+ * A, matches D; the full memory barrier userspace SHOULD issue
+ * after reading the data and before storing the new tail
+ * position.
+ *
+ * See perf_output_put_handle().
+ */
+
head += size;
} while (local_cmpxchg(&rb->head, offset, head) != offset);
/*
- * Separate the userpage->tail read from the data stores below.
- * Matches the MB userspace SHOULD issue after reading the data
- * and before storing the new tail position.
- *
- * See perf_output_put_handle().
+ * We rely on the implied barrier() by local_cmpxchg() to ensure
+ * none of the data stores below can be lifted up by the compiler.
*/
- smp_mb();
if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
local_add(rb->watermark, &rb->wakeup);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 24b7d6c..b886a5e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -73,6 +73,17 @@ struct uprobe {
struct inode *inode; /* Also hold a ref to inode */
loff_t offset;
unsigned long flags;
+
+ /*
+ * The generic code assumes that it has two members of unknown type
+ * owned by the arch-specific code:
+ *
+ * insn - copy_insn() saves the original instruction here for
+ * arch_uprobe_analyze_insn().
+ *
+ * ixol - potentially modified instruction to execute out of
+ * line, copied to xol_area by xol_get_insn_slot().
+ */
struct arch_uprobe arch;
};
@@ -86,6 +97,29 @@ struct return_instance {
};
/*
+ * Execute out of line area: anonymous executable mapping installed
+ * by the probed task to execute the copy of the original instruction
+ * mangled by set_swbp().
+ *
+ * On a breakpoint hit, thread contests for a slot. It frees the
+ * slot after singlestep. Currently a fixed number of slots are
+ * allocated.
+ */
+struct xol_area {
+ wait_queue_head_t wq; /* if all slots are busy */
+ atomic_t slot_count; /* number of in-use slots */
+ unsigned long *bitmap; /* 0 = free slot */
+ struct page *page;
+
+ /*
+ * We keep the vma's vm_start rather than a pointer to the vma
+ * itself. The probed process or a naughty kernel module could make
+ * the vma go away, and we must handle that reasonably gracefully.
+ */
+ unsigned long vaddr; /* Page(s) of instruction slots */
+};
+
+/*
* valid_vma: Verify if the specified vma is an executable vma
* Relax restrictions while unregistering: vm_flags might have
* changed after breakpoint was inserted.
@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
- return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
+ return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
}
static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset;
- void *insn = uprobe->arch.insn;
- int size = MAX_UINSN_BYTES;
+ void *insn = &uprobe->arch.insn;
+ int size = sizeof(uprobe->arch.insn);
int len, err = -EIO;
/* Copy only available bytes, -EIO if nothing was read */
@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
goto out;
ret = -ENOTSUPP;
- if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn))
+ if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
goto out;
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
/* Initialize the slot */
copy_to_page(area->page, xol_vaddr,
- uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
+ &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
/*
* We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too.
@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg)
static void dup_xol_work(struct callback_head *work)
{
- kfree(work);
-
if (current->flags & PF_EXITING)
return;
- if (!__create_xol_area(current->utask->vaddr))
+ if (!__create_xol_area(current->utask->dup_xol_addr))
uprobe_warn(current, "dup xol area");
}
@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
{
struct uprobe_task *utask = current->utask;
struct mm_struct *mm = current->mm;
- struct callback_head *work;
struct xol_area *area;
t->utask = NULL;
@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
if (mm == t->mm)
return;
- /* TODO: move it into the union in uprobe_task */
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return uprobe_warn(t, "dup xol area");
-
- t->utask->vaddr = area->vaddr;
- init_task_work(work, dup_xol_work);
- task_work_add(t, work, true);
+ t->utask->dup_xol_addr = area->vaddr;
+ init_task_work(&t->utask->dup_xol_work, dup_xol_work);
+ task_work_add(t, &t->utask->dup_xol_work, true);
}
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 5721f0e..294189f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1087,8 +1087,10 @@ static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&p->pi_waiters);
+ p->pi_waiters = RB_ROOT;
+ p->pi_waiters_leftmost = NULL;
p->pi_blocked_on = NULL;
+ p->pi_top_task = NULL;
#endif
}
@@ -1172,7 +1174,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* do not allow it to share a thread group or signal handlers or
* parent with the forking task.
*/
- if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
+ if (clone_flags & CLONE_SIGHAND) {
if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
(task_active_pid_ns(current) !=
current->nsproxy->pid_ns_for_children))
@@ -1311,7 +1313,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
- sched_fork(clone_flags, p);
+ retval = sched_fork(clone_flags, p);
+ if (retval)
+ goto bad_fork_cleanup_policy;
retval = perf_event_init_task(p);
if (retval)
@@ -1403,13 +1407,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->tgid = p->pid;
}
- p->pdeath_signal = 0;
- p->exit_state = 0;
-
p->nr_dirtied = 0;
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
p->dirty_paused_when = 0;
+ p->pdeath_signal = 0;
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index b462fa1..aa6a8aa 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
bool pm_freezing;
bool pm_nosig_freezing;
+/*
+ * Temporary export for the deadlock workaround in ata_scsi_hotplug().
+ * Remove once the hack becomes unnecessary.
+ */
+EXPORT_SYMBOL_GPL(pm_freezing);
+
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
diff --git a/kernel/futex.c b/kernel/futex.c
index f6ff019..44a1261 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -63,14 +63,101 @@
#include <linux/sched/rt.h>
#include <linux/hugetlb.h>
#include <linux/freezer.h>
+#include <linux/bootmem.h>
#include <asm/futex.h>
#include "locking/rtmutex_common.h"
-int __read_mostly futex_cmpxchg_enabled;
+/*
+ * Basic futex operation and ordering guarantees:
+ *
+ * The waiter reads the futex value in user space and calls
+ * futex_wait(). This function computes the hash bucket and acquires
+ * the hash bucket lock. After that it reads the futex user space value
+ * again and verifies that the data has not changed. If it has not changed
+ * it enqueues itself into the hash bucket, releases the hash bucket lock
+ * and schedules.
+ *
+ * The waker side modifies the user space value of the futex and calls
+ * futex_wake(). This function computes the hash bucket and acquires the
+ * hash bucket lock. Then it looks for waiters on that futex in the hash
+ * bucket and wakes them.
+ *
+ * In futex wake up scenarios where no tasks are blocked on a futex, taking
+ * the hb spinlock can be avoided and simply return. In order for this
+ * optimization to work, ordering guarantees must exist so that the waiter
+ * being added to the list is acknowledged when the list is concurrently being
+ * checked by the waker, avoiding scenarios like the following:
+ *
+ * CPU 0 CPU 1
+ * val = *futex;
+ * sys_futex(WAIT, futex, val);
+ * futex_wait(futex, val);
+ * uval = *futex;
+ * *futex = newval;
+ * sys_futex(WAKE, futex);
+ * futex_wake(futex);
+ * if (queue_empty())
+ * return;
+ * if (uval == val)
+ * lock(hash_bucket(futex));
+ * queue();
+ * unlock(hash_bucket(futex));
+ * schedule();
+ *
+ * This would cause the waiter on CPU 0 to wait forever because it
+ * missed the transition of the user space value from val to newval
+ * and the waker did not find the waiter in the hash bucket queue.
+ *
+ * The correct serialization ensures that a waiter either observes
+ * the changed user space value before blocking or is woken by a
+ * concurrent waker:
+ *
+ * CPU 0 CPU 1
+ * val = *futex;
+ * sys_futex(WAIT, futex, val);
+ * futex_wait(futex, val);
+ *
+ * waiters++;
+ * mb(); (A) <-- paired with -.
+ * |
+ * lock(hash_bucket(futex)); |
+ * |
+ * uval = *futex; |
+ * | *futex = newval;
+ * | sys_futex(WAKE, futex);
+ * | futex_wake(futex);
+ * |
+ * `-------> mb(); (B)
+ * if (uval == val)
+ * queue();
+ * unlock(hash_bucket(futex));
+ * schedule(); if (waiters)
+ * lock(hash_bucket(futex));
+ * wake_waiters(futex);
+ * unlock(hash_bucket(futex));
+ *
+ * Where (A) orders the waiters increment and the futex value read -- this
+ * is guaranteed by the head counter in the hb spinlock; and where (B)
+ * orders the write to futex and the waiters read -- this is done by the
+ * barriers in get_futex_key_refs(), through either ihold or atomic_inc,
+ * depending on the futex type.
+ *
+ * This yields the following case (where X:=waiters, Y:=futex):
+ *
+ * X = Y = 0
+ *
+ * w[X]=1 w[Y]=1
+ * MB MB
+ * r[Y]=y r[X]=x
+ *
+ * Which guarantees that x==0 && y==0 is impossible; which translates back into
+ * the guarantee that we cannot both miss the futex variable change and the
+ * enqueue.
+ */
-#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
+int __read_mostly futex_cmpxchg_enabled;
/*
* Futex flags used to encode options to functions and preserve them across
@@ -149,9 +236,41 @@ static const struct futex_q futex_q_init = {
struct futex_hash_bucket {
spinlock_t lock;
struct plist_head chain;
-};
+} ____cacheline_aligned_in_smp;
-static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
+static unsigned long __read_mostly futex_hashsize;
+
+static struct futex_hash_bucket *futex_queues;
+
+static inline void futex_get_mm(union futex_key *key)
+{
+ atomic_inc(&key->private.mm->mm_count);
+ /*
+ * Ensure futex_get_mm() implies a full barrier such that
+ * get_futex_key() implies a full barrier. This is relied upon
+ * as full barrier (B), see the ordering comment above.
+ */
+ smp_mb__after_atomic_inc();
+}
+
+static inline bool hb_waiters_pending(struct futex_hash_bucket *hb)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Tasks trying to enter the critical region are most likely
+ * potential waiters that will be added to the plist. Ensure
+ * that wakers won't miss to-be-slept tasks in the window between
+ * the wait call and the actual plist_add.
+ */
+ if (spin_is_locked(&hb->lock))
+ return true;
+ smp_rmb(); /* Make sure we check the lock state first */
+
+ return !plist_head_empty(&hb->chain);
+#else
+ return true;
+#endif
+}
/*
* We hash on the keys returned from get_futex_key (see below).
@@ -161,7 +280,7 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
u32 hash = jhash2((u32*)&key->both.word,
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
key->both.offset);
- return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
+ return &futex_queues[hash & (futex_hashsize - 1)];
}
/*
@@ -187,10 +306,10 @@ static void get_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
- ihold(key->shared.inode);
+ ihold(key->shared.inode); /* implies MB (B) */
break;
case FUT_OFF_MMSHARED:
- atomic_inc(&key->private.mm->mm_count);
+ futex_get_mm(key); /* implies MB (B) */
break;
}
}
@@ -264,7 +383,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
if (!fshared) {
key->private.mm = mm;
key->private.address = address;
- get_futex_key_refs(key);
+ get_futex_key_refs(key); /* implies MB (B) */
return 0;
}
@@ -371,7 +490,7 @@ again:
key->shared.pgoff = basepage_index(page);
}
- get_futex_key_refs(key);
+ get_futex_key_refs(key); /* implies MB (B) */
out:
unlock_page(page_head);
@@ -598,13 +717,10 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
{
struct futex_pi_state *pi_state = NULL;
struct futex_q *this, *next;
- struct plist_head *head;
struct task_struct *p;
pid_t pid = uval & FUTEX_TID_MASK;
- head = &hb->chain;
-
- plist_for_each_entry_safe(this, next, head, list) {
+ plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex(&this->key, key)) {
/*
* Another waiter already exists - bump up
@@ -986,7 +1102,6 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
- struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
int ret;
@@ -998,10 +1113,14 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
goto out;
hb = hash_futex(&key);
+
+ /* Make sure we really have tasks to wakeup */
+ if (!hb_waiters_pending(hb))
+ goto out_put_key;
+
spin_lock(&hb->lock);
- head = &hb->chain;
- plist_for_each_entry_safe(this, next, head, list) {
+ plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
@@ -1019,6 +1138,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
}
spin_unlock(&hb->lock);
+out_put_key:
put_futex_key(&key);
out:
return ret;
@@ -1034,7 +1154,6 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
- struct plist_head *head;
struct futex_q *this, *next;
int ret, op_ret;
@@ -1082,9 +1201,7 @@ retry_private:
goto retry;
}
- head = &hb1->chain;
-
- plist_for_each_entry_safe(this, next, head, list) {
+ plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (match_futex (&this->key, &key1)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
@@ -1097,10 +1214,8 @@ retry_private:
}
if (op_ret > 0) {
- head = &hb2->chain;
-
op_ret = 0;
- plist_for_each_entry_safe(this, next, head, list) {
+ plist_for_each_entry_safe(this, next, &hb2->chain, list) {
if (match_futex (&this->key, &key2)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
@@ -1270,7 +1385,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
- struct plist_head *head1;
struct futex_q *this, *next;
u32 curval2;
@@ -1393,8 +1507,7 @@ retry_private:
}
}
- head1 = &hb1->chain;
- plist_for_each_entry_safe(this, next, head1, list) {
+ plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (task_count - nr_wake >= nr_requeue)
break;
@@ -1489,12 +1602,12 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
- spin_lock(&hb->lock);
+ spin_lock(&hb->lock); /* implies MB (A) */
return hb;
}
static inline void
-queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
+queue_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
@@ -1867,7 +1980,7 @@ retry_private:
ret = get_futex_value_locked(&uval, uaddr);
if (ret) {
- queue_unlock(q, *hb);
+ queue_unlock(*hb);
ret = get_user(uval, uaddr);
if (ret)
@@ -1881,7 +1994,7 @@ retry_private:
}
if (uval != val) {
- queue_unlock(q, *hb);
+ queue_unlock(*hb);
ret = -EWOULDBLOCK;
}
@@ -2029,7 +2142,7 @@ retry_private:
* Task is exiting and we just wait for the
* exit to complete.
*/
- queue_unlock(&q, hb);
+ queue_unlock(hb);
put_futex_key(&q.key);
cond_resched();
goto retry;
@@ -2081,7 +2194,7 @@ retry_private:
goto out_put_key;
out_unlock_put_key:
- queue_unlock(&q, hb);
+ queue_unlock(hb);
out_put_key:
put_futex_key(&q.key);
@@ -2091,7 +2204,7 @@ out:
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
- queue_unlock(&q, hb);
+ queue_unlock(hb);
ret = fault_in_user_writeable(uaddr);
if (ret)
@@ -2113,7 +2226,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
- struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
int ret;
@@ -2153,9 +2265,7 @@ retry:
* Ok, other tasks may need to be woken up - check waiters
* and do the wakeup if necessary:
*/
- head = &hb->chain;
-
- plist_for_each_entry_safe(this, next, head, list) {
+ plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (!match_futex (&this->key, &key))
continue;
ret = wake_futex_pi(uaddr, uval, this);
@@ -2316,6 +2426,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
+ RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
+ RB_CLEAR_NODE(&rt_waiter.tree_entry);
rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
@@ -2734,8 +2846,21 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
static int __init futex_init(void)
{
u32 curval;
- int i;
+ unsigned int futex_shift;
+ unsigned long i;
+
+#if CONFIG_BASE_SMALL
+ futex_hashsize = 16;
+#else
+ futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
+#endif
+ futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
+ futex_hashsize, 0,
+ futex_hashsize < 256 ? HASH_SMALL : 0,
+ &futex_shift, NULL,
+ futex_hashsize, futex_hashsize);
+ futex_hashsize = 1UL << futex_shift;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
@@ -2749,7 +2874,7 @@ static int __init futex_init(void)
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
- for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ for (i = 0; i < futex_hashsize; i++) {
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 383319b..0909436 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -46,6 +46,7 @@
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
+#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
@@ -1610,7 +1611,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
unsigned long slack;
slack = current->timer_slack_ns;
- if (rt_task(current))
+ if (dl_task(current) || rt_task(current))
slack = 0;
hrtimer_init_on_stack(&t.timer, clockid, mode);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 576ba75..eb8a547 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -590,6 +590,7 @@ static int very_verbose(struct lock_class *class)
/*
* Is this the address of a static object:
*/
+#ifdef __KERNEL__
static int static_obj(void *obj)
{
unsigned long start = (unsigned long) &_stext,
@@ -616,6 +617,7 @@ static int static_obj(void *obj)
*/
return is_module_address(addr) || is_module_percpu_address(addr);
}
+#endif
/*
* To make lock name printouts unique, we calculate a unique
@@ -4115,6 +4117,7 @@ void debug_check_no_locks_held(void)
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
+#ifdef __KERNEL__
void debug_show_all_locks(void)
{
struct task_struct *g, *p;
@@ -4172,6 +4175,7 @@ retry:
read_unlock(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(debug_show_all_locks);
+#endif
/*
* Careful: only use this function if you are sure that
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 7e3443f..faf6f5b 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -75,7 +75,12 @@ void debug_mutex_unlock(struct mutex *lock)
return;
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
- DEBUG_LOCKS_WARN_ON(lock->owner != current);
+
+ if (!lock->owner)
+ DEBUG_LOCKS_WARN_ON(!lock->owner);
+ else
+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
+
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
mutex_clear_owner(lock);
}
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index 13b243a..49b2ed3 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -24,7 +24,7 @@
#include <linux/kallsyms.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
-#include <linux/plist.h>
+#include <linux/rbtree.h>
#include <linux/fs.h>
#include <linux/debug_locks.h>
@@ -57,7 +57,7 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
void rt_mutex_debug_task_free(struct task_struct *task)
{
- DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
+ DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters));
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
@@ -154,16 +154,12 @@ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
memset(waiter, 0x11, sizeof(*waiter));
- plist_node_init(&waiter->list_entry, MAX_PRIO);
- plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
waiter->deadlock_task_pid = NULL;
}
void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
put_pid(waiter->deadlock_task_pid);
- DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
- DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
memset(waiter, 0x22, sizeof(*waiter));
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 0dd6aec..2e960a2 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
+#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include "rtmutex_common.h"
@@ -91,10 +92,107 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
}
#endif
+static inline int
+rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ struct rt_mutex_waiter *right)
+{
+ if (left->prio < right->prio)
+ return 1;
+
+ /*
+ * If both waiters have dl_prio(), we check the deadlines of the
+ * associated tasks.
+ * If left waiter has a dl_prio(), and we didn't return 1 above,
+ * then right waiter has a dl_prio() too.
+ */
+ if (dl_prio(left->prio))
+ return (left->task->dl.deadline < right->task->dl.deadline);
+
+ return 0;
+}
+
+static void
+rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+{
+ struct rb_node **link = &lock->waiters.rb_node;
+ struct rb_node *parent = NULL;
+ struct rt_mutex_waiter *entry;
+ int leftmost = 1;
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
+ if (rt_mutex_waiter_less(waiter, entry)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = 0;
+ }
+ }
+
+ if (leftmost)
+ lock->waiters_leftmost = &waiter->tree_entry;
+
+ rb_link_node(&waiter->tree_entry, parent, link);
+ rb_insert_color(&waiter->tree_entry, &lock->waiters);
+}
+
+static void
+rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+{
+ if (RB_EMPTY_NODE(&waiter->tree_entry))
+ return;
+
+ if (lock->waiters_leftmost == &waiter->tree_entry)
+ lock->waiters_leftmost = rb_next(&waiter->tree_entry);
+
+ rb_erase(&waiter->tree_entry, &lock->waiters);
+ RB_CLEAR_NODE(&waiter->tree_entry);
+}
+
+static void
+rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
+{
+ struct rb_node **link = &task->pi_waiters.rb_node;
+ struct rb_node *parent = NULL;
+ struct rt_mutex_waiter *entry;
+ int leftmost = 1;
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
+ if (rt_mutex_waiter_less(waiter, entry)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = 0;
+ }
+ }
+
+ if (leftmost)
+ task->pi_waiters_leftmost = &waiter->pi_tree_entry;
+
+ rb_link_node(&waiter->pi_tree_entry, parent, link);
+ rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
+}
+
+static void
+rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
+{
+ if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
+ return;
+
+ if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
+ task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
+
+ rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
+ RB_CLEAR_NODE(&waiter->pi_tree_entry);
+}
+
/*
- * Calculate task priority from the waiter list priority
+ * Calculate task priority from the waiter tree priority
*
- * Return task->normal_prio when the waiter list is empty or when
+ * Return task->normal_prio when the waiter tree is empty or when
* the waiter is not allowed to do priority boosting
*/
int rt_mutex_getprio(struct task_struct *task)
@@ -102,10 +200,18 @@ int rt_mutex_getprio(struct task_struct *task)
if (likely(!task_has_pi_waiters(task)))
return task->normal_prio;
- return min(task_top_pi_waiter(task)->pi_list_entry.prio,
+ return min(task_top_pi_waiter(task)->prio,
task->normal_prio);
}
+struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+{
+ if (likely(!task_has_pi_waiters(task)))
+ return NULL;
+
+ return task_top_pi_waiter(task)->task;
+}
+
/*
* Adjust the priority of a task, after its pi_waiters got modified.
*
@@ -115,7 +221,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
{
int prio = rt_mutex_getprio(task);
- if (task->prio != prio)
+ if (task->prio != prio || dl_prio(prio))
rt_mutex_setprio(task, prio);
}
@@ -233,7 +339,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* When deadlock detection is off then we check, if further
* priority adjustment is necessary.
*/
- if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+ if (!detect_deadlock && waiter->prio == task->prio)
goto out_unlock_pi;
lock = waiter->lock;
@@ -254,9 +360,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
top_waiter = rt_mutex_top_waiter(lock);
/* Requeue the waiter */
- plist_del(&waiter->list_entry, &lock->wait_list);
- waiter->list_entry.prio = task->prio;
- plist_add(&waiter->list_entry, &lock->wait_list);
+ rt_mutex_dequeue(lock, waiter);
+ waiter->prio = task->prio;
+ rt_mutex_enqueue(lock, waiter);
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
@@ -280,17 +386,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
- plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
- waiter->pi_list_entry.prio = waiter->list_entry.prio;
- plist_add(&waiter->pi_list_entry, &task->pi_waiters);
+ rt_mutex_dequeue_pi(task, top_waiter);
+ rt_mutex_enqueue_pi(task, waiter);
__rt_mutex_adjust_prio(task);
} else if (top_waiter == waiter) {
/* Deboost the owner */
- plist_del(&waiter->pi_list_entry, &task->pi_waiters);
+ rt_mutex_dequeue_pi(task, waiter);
waiter = rt_mutex_top_waiter(lock);
- waiter->pi_list_entry.prio = waiter->list_entry.prio;
- plist_add(&waiter->pi_list_entry, &task->pi_waiters);
+ rt_mutex_enqueue_pi(task, waiter);
__rt_mutex_adjust_prio(task);
}
@@ -355,7 +459,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* 3) it is top waiter
*/
if (rt_mutex_has_waiters(lock)) {
- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+ if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
if (!waiter || waiter != rt_mutex_top_waiter(lock))
return 0;
}
@@ -369,7 +473,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
/* remove the queued waiter. */
if (waiter) {
- plist_del(&waiter->list_entry, &lock->wait_list);
+ rt_mutex_dequeue(lock, waiter);
task->pi_blocked_on = NULL;
}
@@ -379,8 +483,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
*/
if (rt_mutex_has_waiters(lock)) {
top = rt_mutex_top_waiter(lock);
- top->pi_list_entry.prio = top->list_entry.prio;
- plist_add(&top->pi_list_entry, &task->pi_waiters);
+ rt_mutex_enqueue_pi(task, top);
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
@@ -416,13 +519,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
- plist_node_init(&waiter->list_entry, task->prio);
- plist_node_init(&waiter->pi_list_entry, task->prio);
+ waiter->prio = task->prio;
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
top_waiter = rt_mutex_top_waiter(lock);
- plist_add(&waiter->list_entry, &lock->wait_list);
+ rt_mutex_enqueue(lock, waiter);
task->pi_blocked_on = waiter;
@@ -433,8 +535,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
if (waiter == rt_mutex_top_waiter(lock)) {
raw_spin_lock_irqsave(&owner->pi_lock, flags);
- plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
- plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+ rt_mutex_dequeue_pi(owner, top_waiter);
+ rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
@@ -486,7 +588,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* boosted mode and go back to normal after releasing
* lock->wait_lock.
*/
- plist_del(&waiter->pi_list_entry, &current->pi_waiters);
+ rt_mutex_dequeue_pi(current, waiter);
rt_mutex_set_owner(lock, NULL);
@@ -510,7 +612,7 @@ static void remove_waiter(struct rt_mutex *lock,
int chain_walk = 0;
raw_spin_lock_irqsave(&current->pi_lock, flags);
- plist_del(&waiter->list_entry, &lock->wait_list);
+ rt_mutex_dequeue(lock, waiter);
current->pi_blocked_on = NULL;
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
@@ -521,13 +623,13 @@ static void remove_waiter(struct rt_mutex *lock,
raw_spin_lock_irqsave(&owner->pi_lock, flags);
- plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
+ rt_mutex_dequeue_pi(owner, waiter);
if (rt_mutex_has_waiters(lock)) {
struct rt_mutex_waiter *next;
next = rt_mutex_top_waiter(lock);
- plist_add(&next->pi_list_entry, &owner->pi_waiters);
+ rt_mutex_enqueue_pi(owner, next);
}
__rt_mutex_adjust_prio(owner);
@@ -537,8 +639,6 @@ static void remove_waiter(struct rt_mutex *lock,
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
- WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
-
if (!chain_walk)
return;
@@ -565,7 +665,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
- if (!waiter || waiter->list_entry.prio == task->prio) {
+ if (!waiter || (waiter->prio == task->prio &&
+ !dl_prio(task->prio))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
@@ -638,6 +739,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
int ret = 0;
debug_rt_mutex_init_waiter(&waiter);
+ RB_CLEAR_NODE(&waiter.pi_tree_entry);
+ RB_CLEAR_NODE(&waiter.tree_entry);
raw_spin_lock(&lock->wait_lock);
@@ -904,7 +1007,8 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
- plist_head_init(&lock->wait_list);
+ lock->waiters = RB_ROOT;
+ lock->waiters_leftmost = NULL;
debug_rt_mutex_init(lock, name);
}
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 53a66c8..7431a9c 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -40,13 +40,13 @@ extern void schedule_rt_mutex_test(struct rt_mutex *lock);
* This is the control structure for tasks blocked on a rt_mutex,
* which is allocated on the kernel stack on of the blocked task.
*
- * @list_entry: pi node to enqueue into the mutex waiters list
- * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
+ * @tree_entry: pi node to enqueue into the mutex waiters tree
+ * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
* @task: task reference to the blocked task
*/
struct rt_mutex_waiter {
- struct plist_node list_entry;
- struct plist_node pi_list_entry;
+ struct rb_node tree_entry;
+ struct rb_node pi_tree_entry;
struct task_struct *task;
struct rt_mutex *lock;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -54,14 +54,15 @@ struct rt_mutex_waiter {
struct pid *deadlock_task_pid;
struct rt_mutex *deadlock_lock;
#endif
+ int prio;
};
/*
- * Various helpers to access the waiters-plist:
+ * Various helpers to access the waiters-tree:
*/
static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
{
- return !plist_head_empty(&lock->wait_list);
+ return !RB_EMPTY_ROOT(&lock->waiters);
}
static inline struct rt_mutex_waiter *
@@ -69,8 +70,8 @@ rt_mutex_top_waiter(struct rt_mutex *lock)
{
struct rt_mutex_waiter *w;
- w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
- list_entry);
+ w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter,
+ tree_entry);
BUG_ON(w->lock != lock);
return w;
@@ -78,14 +79,14 @@ rt_mutex_top_waiter(struct rt_mutex *lock)
static inline int task_has_pi_waiters(struct task_struct *p)
{
- return !plist_head_empty(&p->pi_waiters);
+ return !RB_EMPTY_ROOT(&p->pi_waiters);
}
static inline struct rt_mutex_waiter *
task_top_pi_waiter(struct task_struct *p)
{
- return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
- pi_list_entry);
+ return rb_entry(p->pi_waiters_leftmost, struct rt_mutex_waiter,
+ pi_tree_entry);
}
/*
diff --git a/kernel/panic.c b/kernel/panic.c
index c00b4ce..6d63003 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -33,7 +33,7 @@ static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
-int panic_timeout;
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c7f31aa..3b89464 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
/*
* Sample a process (thread group) clock for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
+ * Must be called with task sighand lock held for safe while_each_thread()
+ * traversal.
*/
static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
@@ -260,30 +261,53 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
return 0;
}
+static int posix_cpu_clock_get_task(struct task_struct *tsk,
+ const clockid_t which_clock,
+ struct timespec *tp)
+{
+ int err = -EINVAL;
+ unsigned long long rtn;
+
+ if (CPUCLOCK_PERTHREAD(which_clock)) {
+ if (same_thread_group(tsk, current))
+ err = cpu_clock_sample(which_clock, tsk, &rtn);
+ } else {
+ unsigned long flags;
+ struct sighand_struct *sighand;
+
+ /*
+ * while_each_thread() is not yet entirely RCU safe,
+ * keep locking the group while sampling process
+ * clock for now.
+ */
+ sighand = lock_task_sighand(tsk, &flags);
+ if (!sighand)
+ return err;
+
+ if (tsk == current || thread_group_leader(tsk))
+ err = cpu_clock_sample_group(which_clock, tsk, &rtn);
+
+ unlock_task_sighand(tsk, &flags);
+ }
+
+ if (!err)
+ sample_to_timespec(which_clock, rtn, tp);
+
+ return err;
+}
+
static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
- int error = -EINVAL;
- unsigned long long rtn;
+ int err = -EINVAL;
if (pid == 0) {
/*
* Special case constant value for our own clocks.
* We don't have to do any lookup to find ourselves.
*/
- if (CPUCLOCK_PERTHREAD(which_clock)) {
- /*
- * Sampling just ourselves we can do with no locking.
- */
- error = cpu_clock_sample(which_clock,
- current, &rtn);
- } else {
- read_lock(&tasklist_lock);
- error = cpu_clock_sample_group(which_clock,
- current, &rtn);
- read_unlock(&tasklist_lock);
- }
+ err = posix_cpu_clock_get_task(current, which_clock, tp);
} else {
/*
* Find the given PID, and validate that the caller
@@ -292,29 +316,12 @@ static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
- if (p) {
- if (CPUCLOCK_PERTHREAD(which_clock)) {
- if (same_thread_group(p, current)) {
- error = cpu_clock_sample(which_clock,
- p, &rtn);
- }
- } else {
- read_lock(&tasklist_lock);
- if (thread_group_leader(p) && p->sighand) {
- error =
- cpu_clock_sample_group(which_clock,
- p, &rtn);
- }
- read_unlock(&tasklist_lock);
- }
- }
+ if (p)
+ err = posix_cpu_clock_get_task(p, which_clock, tp);
rcu_read_unlock();
}
- if (error)
- return error;
- sample_to_timespec(which_clock, rtn, tp);
- return 0;
+ return err;
}
@@ -371,36 +378,40 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
*/
static int posix_cpu_timer_del(struct k_itimer *timer)
{
- struct task_struct *p = timer->it.cpu.task;
int ret = 0;
+ unsigned long flags;
+ struct sighand_struct *sighand;
+ struct task_struct *p = timer->it.cpu.task;
- if (likely(p != NULL)) {
- read_lock(&tasklist_lock);
- if (unlikely(p->sighand == NULL)) {
- /*
- * We raced with the reaping of the task.
- * The deletion should have cleared us off the list.
- */
- BUG_ON(!list_empty(&timer->it.cpu.entry));
- } else {
- spin_lock(&p->sighand->siglock);
- if (timer->it.cpu.firing)
- ret = TIMER_RETRY;
- else
- list_del(&timer->it.cpu.entry);
- spin_unlock(&p->sighand->siglock);
- }
- read_unlock(&tasklist_lock);
+ WARN_ON_ONCE(p == NULL);
- if (!ret)
- put_task_struct(p);
+ /*
+ * Protect against sighand release/switch in exit/exec and process/
+ * thread timer list entry concurrent read/writes.
+ */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL)) {
+ /*
+ * We raced with the reaping of the task.
+ * The deletion should have cleared us off the list.
+ */
+ WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
+ } else {
+ if (timer->it.cpu.firing)
+ ret = TIMER_RETRY;
+ else
+ list_del(&timer->it.cpu.entry);
+
+ unlock_task_sighand(p, &flags);
}
+ if (!ret)
+ put_task_struct(p);
+
return ret;
}
-static void cleanup_timers_list(struct list_head *head,
- unsigned long long curr)
+static void cleanup_timers_list(struct list_head *head)
{
struct cpu_timer_list *timer, *next;
@@ -414,16 +425,11 @@ static void cleanup_timers_list(struct list_head *head,
* time for later timer_gettime calls to return.
* This must be called with the siglock held.
*/
-static void cleanup_timers(struct list_head *head,
- cputime_t utime, cputime_t stime,
- unsigned long long sum_exec_runtime)
+static void cleanup_timers(struct list_head *head)
{
-
- cputime_t ptime = utime + stime;
-
- cleanup_timers_list(head, cputime_to_expires(ptime));
- cleanup_timers_list(++head, cputime_to_expires(utime));
- cleanup_timers_list(++head, sum_exec_runtime);
+ cleanup_timers_list(head);
+ cleanup_timers_list(++head);
+ cleanup_timers_list(++head);
}
/*
@@ -433,41 +439,14 @@ static void cleanup_timers(struct list_head *head,
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
- cputime_t utime, stime;
-
add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
sizeof(unsigned long long));
- task_cputime(tsk, &utime, &stime);
- cleanup_timers(tsk->cpu_timers,
- utime, stime, tsk->se.sum_exec_runtime);
+ cleanup_timers(tsk->cpu_timers);
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
- struct signal_struct *const sig = tsk->signal;
- cputime_t utime, stime;
-
- task_cputime(tsk, &utime, &stime);
- cleanup_timers(tsk->signal->cpu_timers,
- utime + sig->utime, stime + sig->stime,
- tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
-}
-
-static void clear_dead_task(struct k_itimer *itimer, unsigned long long now)
-{
- struct cpu_timer_list *timer = &itimer->it.cpu;
-
- /*
- * That's all for this thread or process.
- * We leave our residual in expires to be reported.
- */
- put_task_struct(timer->task);
- timer->task = NULL;
- if (timer->expires < now) {
- timer->expires = 0;
- } else {
- timer->expires -= now;
- }
+ cleanup_timers(tsk->signal->cpu_timers);
}
static inline int expires_gt(cputime_t expires, cputime_t new_exp)
@@ -477,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
/*
* Insert the timer on the appropriate list before any timers that
- * expire later. This must be called with the tasklist_lock held
- * for reading, interrupts disabled and p->sighand->siglock taken.
+ * expire later. This must be called with the sighand lock held.
*/
static void arm_timer(struct k_itimer *timer)
{
@@ -569,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
/*
* Sample a process (thread group) timer for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
+ * Must be called with task sighand lock held for safe while_each_thread()
+ * traversal.
*/
static int cpu_timer_sample_group(const clockid_t which_clock,
struct task_struct *p,
@@ -608,7 +587,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
*/
static void posix_cpu_timer_kick_nohz(void)
{
- schedule_work(&nohz_kick_work);
+ if (context_tracking_is_enabled())
+ schedule_work(&nohz_kick_work);
}
bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
@@ -631,43 +611,39 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
-static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec *new, struct itimerspec *old)
{
+ unsigned long flags;
+ struct sighand_struct *sighand;
struct task_struct *p = timer->it.cpu.task;
unsigned long long old_expires, new_expires, old_incr, val;
int ret;
- if (unlikely(p == NULL)) {
- /*
- * Timer refers to a dead task's clock.
- */
- return -ESRCH;
- }
+ WARN_ON_ONCE(p == NULL);
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
- read_lock(&tasklist_lock);
/*
- * We need the tasklist_lock to protect against reaping that
- * clears p->sighand. If p has just been reaped, we can no
+ * Protect against sighand release/switch in exit/exec and p->cpu_timers
+ * and p->signal->cpu_timers read/write in arm_timer()
+ */
+ sighand = lock_task_sighand(p, &flags);
+ /*
+ * If p has just been reaped, we can no
* longer get any information about it at all.
*/
- if (unlikely(p->sighand == NULL)) {
- read_unlock(&tasklist_lock);
- put_task_struct(p);
- timer->it.cpu.task = NULL;
+ if (unlikely(sighand == NULL)) {
return -ESRCH;
}
/*
* Disarm any old timer after extracting its expiry time.
*/
- BUG_ON(!irqs_disabled());
+ WARN_ON_ONCE(!irqs_disabled());
ret = 0;
old_incr = timer->it.cpu.incr;
- spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1;
@@ -724,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
- spin_unlock(&p->sighand->siglock);
- read_unlock(&tasklist_lock);
+ unlock_task_sighand(p, &flags);
goto out;
}
- if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
+ if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
new_expires += val;
}
@@ -743,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
arm_timer(timer);
}
- spin_unlock(&p->sighand->siglock);
- read_unlock(&tasklist_lock);
-
+ unlock_task_sighand(p, &flags);
/*
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
@@ -787,7 +760,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
{
unsigned long long now;
struct task_struct *p = timer->it.cpu.task;
- int clear_dead;
+
+ WARN_ON_ONCE(p == NULL);
/*
* Easy part: convert the reload time.
@@ -800,52 +774,34 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
return;
}
- if (unlikely(p == NULL)) {
- /*
- * This task already died and the timer will never fire.
- * In this case, expires is actually the dead value.
- */
- dead:
- sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
- &itp->it_value);
- return;
- }
-
/*
* Sample the clock to take the difference with the expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
- clear_dead = p->exit_state;
} else {
- read_lock(&tasklist_lock);
- if (unlikely(p->sighand == NULL)) {
+ struct sighand_struct *sighand;
+ unsigned long flags;
+
+ /*
+ * Protect against sighand release/switch in exit/exec and
+ * also make timer sampling safe if it ends up calling
+ * thread_group_cputime().
+ */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
* Call the timer disarmed, nothing else to do.
*/
- put_task_struct(p);
- timer->it.cpu.task = NULL;
timer->it.cpu.expires = 0;
- read_unlock(&tasklist_lock);
- goto dead;
+ sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
+ &itp->it_value);
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
- clear_dead = (unlikely(p->exit_state) &&
- thread_group_empty(p));
+ unlock_task_sighand(p, &flags);
}
- read_unlock(&tasklist_lock);
- }
-
- if (unlikely(clear_dead)) {
- /*
- * We've noticed that the thread is dead, but
- * not yet reaped. Take this opportunity to
- * drop our task ref.
- */
- clear_dead_task(timer, now);
- goto dead;
}
if (now < timer->it.cpu.expires) {
@@ -1059,14 +1015,12 @@ static void check_process_timers(struct task_struct *tsk,
*/
void posix_cpu_timer_schedule(struct k_itimer *timer)
{
+ struct sighand_struct *sighand;
+ unsigned long flags;
struct task_struct *p = timer->it.cpu.task;
unsigned long long now;
- if (unlikely(p == NULL))
- /*
- * The task was cleaned up already, no future firings.
- */
- goto out;
+ WARN_ON_ONCE(p == NULL);
/*
* Fetch the current sample and update the timer's expiry time.
@@ -1074,49 +1028,45 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
- if (unlikely(p->exit_state)) {
- clear_dead_task(timer, now);
+ if (unlikely(p->exit_state))
+ goto out;
+
+ /* Protect timer list r/w in arm_timer() */
+ sighand = lock_task_sighand(p, &flags);
+ if (!sighand)
goto out;
- }
- read_lock(&tasklist_lock); /* arm_timer needs it. */
- spin_lock(&p->sighand->siglock);
} else {
- read_lock(&tasklist_lock);
- if (unlikely(p->sighand == NULL)) {
+ /*
+ * Protect arm_timer() and timer sampling in case of call to
+ * thread_group_cputime().
+ */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
*/
- put_task_struct(p);
- timer->it.cpu.task = p = NULL;
timer->it.cpu.expires = 0;
- goto out_unlock;
+ goto out;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
- /*
- * We've noticed that the thread is dead, but
- * not yet reaped. Take this opportunity to
- * drop our task ref.
- */
- cpu_timer_sample_group(timer->it_clock, p, &now);
- clear_dead_task(timer, now);
- goto out_unlock;
+ unlock_task_sighand(p, &flags);
+ /* Optimizations: if the process is dying, no need to rearm */
+ goto out;
}
- spin_lock(&p->sighand->siglock);
cpu_timer_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
- /* Leave the tasklist_lock locked for the call below. */
+ /* Leave the sighand locked for the call below. */
}
/*
* Now re-arm for the new expiry time.
*/
- BUG_ON(!irqs_disabled());
+ WARN_ON_ONCE(!irqs_disabled());
arm_timer(timer);
- spin_unlock(&p->sighand->siglock);
-
-out_unlock:
- read_unlock(&tasklist_lock);
+ unlock_task_sighand(p, &flags);
+ /* Kick full dynticks CPUs in case they need to tick on the new timer */
+ posix_cpu_timer_kick_nohz();
out:
timer->it_overrun_last = timer->it_overrun;
timer->it_overrun = -1;
@@ -1200,7 +1150,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
struct k_itimer *timer, *next;
unsigned long flags;
- BUG_ON(!irqs_disabled());
+ WARN_ON_ONCE(!irqs_disabled());
/*
* The fast path checks that there are no expired thread or thread
@@ -1256,13 +1206,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
-
- /*
- * In case some timers were rescheduled after the queue got emptied,
- * wake up full dynticks CPUs.
- */
- if (tsk->signal->cputimer.running)
- posix_cpu_timer_kick_nohz();
}
/*
@@ -1274,7 +1217,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
{
unsigned long long now;
- BUG_ON(clock_idx == CPUCLOCK_SCHED);
+ WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval) {
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 463aa673..eacb8bd 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev)
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
list_del(&tmp->head);
+ kfree(tmp);
break;
}
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 7859a0a..79c3877 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -96,19 +96,22 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-extern void kfree(const void *);
+void kfree(const void *);
static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
{
unsigned long offset = (unsigned long)head->func;
+ rcu_lock_acquire(&rcu_callback_map);
if (__is_kfree_rcu_offset(offset)) {
RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
kfree((void *)head - offset);
+ rcu_lock_release(&rcu_callback_map);
return 1;
} else {
RCU_TRACE(trace_rcu_invoke_callback(rn, head));
head->func(head);
+ rcu_lock_release(&rcu_callback_map);
return 0;
}
}
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 01d5ccb..3318d82 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -363,6 +363,29 @@ static void srcu_flip(struct srcu_struct *sp)
/*
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
+ *
+ * Note that all CPUs must agree that the grace period extended beyond
+ * all pre-existing SRCU read-side critical section. On systems with
+ * more than one CPU, this means that when "func()" is invoked, each CPU
+ * is guaranteed to have executed a full memory barrier since the end of
+ * its last corresponding SRCU read-side critical section whose beginning
+ * preceded the call to call_rcu(). It also means that each CPU executing
+ * an SRCU read-side critical section that continues beyond the start of
+ * "func()" must have executed a memory barrier after the call_rcu()
+ * but before the beginning of that SRCU read-side critical section.
+ * Note that these guarantees include CPUs that are offline, idle, or
+ * executing in user mode, as well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * resulting SRCU callback function "func()", then both CPU A and CPU
+ * B are guaranteed to execute a full memory barrier during the time
+ * interval between the call to call_rcu() and the invocation of "func()".
+ * This guarantee applies even if CPU A and CPU B are the same CPU (but
+ * again only if the system has more than one CPU).
+ *
+ * Of course, these guarantees apply only for invocations of call_srcu(),
+ * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
+ * srcu_struct structure.
*/
void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
void (*func)(struct rcu_head *head))
@@ -459,7 +482,30 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
* Note that it is illegal to call synchronize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock.
* However, it is perfectly legal to call synchronize_srcu() on one
- * srcu_struct from some other srcu_struct's read-side critical section.
+ * srcu_struct from some other srcu_struct's read-side critical section,
+ * as long as the resulting graph of srcu_structs is acyclic.
+ *
+ * There are memory-ordering constraints implied by synchronize_srcu().
+ * On systems with more than one CPU, when synchronize_srcu() returns,
+ * each CPU is guaranteed to have executed a full memory barrier since
+ * the end of its last corresponding SRCU-sched read-side critical section
+ * whose beginning preceded the call to synchronize_srcu(). In addition,
+ * each CPU having an SRCU read-side critical section that extends beyond
+ * the return from synchronize_srcu() is guaranteed to have executed a
+ * full memory barrier after the beginning of synchronize_srcu() and before
+ * the beginning of that SRCU read-side critical section. Note that these
+ * guarantees include CPUs that are offline, idle, or executing in user mode,
+ * as well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked synchronize_srcu(), which returned
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
+ * to have executed a full memory barrier during the execution of
+ * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
+ * are the same CPU, but again only if the system has more than one CPU.
+ *
+ * Of course, these memory-ordering guarantees apply only when
+ * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
+ * passed the same srcu_struct structure.
*/
void synchronize_srcu(struct srcu_struct *sp)
{
@@ -476,12 +522,8 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
* Wait for an SRCU grace period to elapse, but be more aggressive about
* spinning rather than blocking when waiting.
*
- * Note that it is also illegal to call synchronize_srcu_expedited()
- * from the corresponding SRCU read-side critical section;
- * doing so will result in deadlock. However, it is perfectly legal
- * to call synchronize_srcu_expedited() on one srcu_struct from some
- * other srcu_struct's read-side critical section, as long as
- * the resulting graph of srcu_structs is acyclic.
+ * Note that synchronize_srcu_expedited() has the same deadlock and
+ * memory-ordering properties as does synchronize_srcu().
*/
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
@@ -491,6 +533,7 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/**
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
+ * @sp: srcu_struct on which to wait for in-flight callbacks.
*/
void srcu_barrier(struct srcu_struct *sp)
{
diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
index 3929cd4..732f8ae 100644
--- a/kernel/rcu/torture.c
+++ b/kernel/rcu/torture.c
@@ -139,8 +139,6 @@ MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
#define VERBOSE_PRINTK_ERRSTRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
-static char printk_buf[4096];
-
static int nrealreaders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
@@ -376,7 +374,7 @@ struct rcu_torture_ops {
void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
void (*cb_barrier)(void);
void (*fqs)(void);
- int (*stats)(char *page);
+ void (*stats)(char *page);
int irq_capable;
int can_boost;
const char *name;
@@ -578,21 +576,19 @@ static void srcu_torture_barrier(void)
srcu_barrier(&srcu_ctl);
}
-static int srcu_torture_stats(char *page)
+static void srcu_torture_stats(char *page)
{
- int cnt = 0;
int cpu;
int idx = srcu_ctl.completed & 0x1;
- cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
+ page += sprintf(page, "%s%s per-CPU(idx=%d):",
torture_type, TORTURE_FLAG, idx);
for_each_possible_cpu(cpu) {
- cnt += sprintf(&page[cnt], " %d(%lu,%lu)", cpu,
+ page += sprintf(page, " %d(%lu,%lu)", cpu,
per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
}
- cnt += sprintf(&page[cnt], "\n");
- return cnt;
+ sprintf(page, "\n");
}
static void srcu_torture_synchronize_expedited(void)
@@ -1052,10 +1048,9 @@ rcu_torture_reader(void *arg)
/*
* Create an RCU-torture statistics message in the specified buffer.
*/
-static int
+static void
rcu_torture_printk(char *page)
{
- int cnt = 0;
int cpu;
int i;
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
@@ -1071,8 +1066,8 @@ rcu_torture_printk(char *page)
if (pipesummary[i] != 0)
break;
}
- cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
- cnt += sprintf(&page[cnt],
+ page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page,
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
rcu_torture_current,
rcu_torture_current_version,
@@ -1080,53 +1075,52 @@ rcu_torture_printk(char *page)
atomic_read(&n_rcu_torture_alloc),
atomic_read(&n_rcu_torture_alloc_fail),
atomic_read(&n_rcu_torture_free));
- cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
+ page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
atomic_read(&n_rcu_torture_mberror),
n_rcu_torture_boost_ktrerror,
n_rcu_torture_boost_rterror);
- cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
+ page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
n_rcu_torture_timers);
- cnt += sprintf(&page[cnt],
+ page += sprintf(page,
"onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
n_online_successes, n_online_attempts,
n_offline_successes, n_offline_attempts,
min_online, max_online,
min_offline, max_offline,
sum_online, sum_offline, HZ);
- cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld",
+ page += sprintf(page, "barrier: %ld/%ld:%ld",
n_barrier_successes,
n_barrier_attempts,
n_rcu_torture_barrier_error);
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
n_rcu_torture_barrier_error != 0 ||
n_rcu_torture_boost_ktrerror != 0 ||
n_rcu_torture_boost_rterror != 0 ||
n_rcu_torture_boost_failure != 0 ||
i > 1) {
- cnt += sprintf(&page[cnt], "!!! ");
+ page += sprintf(page, "!!! ");
atomic_inc(&n_rcu_torture_error);
WARN_ON_ONCE(1);
}
- cnt += sprintf(&page[cnt], "Reader Pipe: ");
+ page += sprintf(page, "Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
- cnt += sprintf(&page[cnt], "Reader Batch: ");
+ page += sprintf(page, " %ld", pipesummary[i]);
+ page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page, "Reader Batch: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
- cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
+ page += sprintf(page, " %ld", batchsummary[i]);
+ page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page, "Free-Block Circulation: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- cnt += sprintf(&page[cnt], " %d",
+ page += sprintf(page, " %d",
atomic_read(&rcu_torture_wcount[i]));
}
- cnt += sprintf(&page[cnt], "\n");
+ page += sprintf(page, "\n");
if (cur_ops->stats)
- cnt += cur_ops->stats(&page[cnt]);
- return cnt;
+ cur_ops->stats(page);
}
/*
@@ -1140,10 +1134,17 @@ rcu_torture_printk(char *page)
static void
rcu_torture_stats_print(void)
{
- int cnt;
+ int size = nr_cpu_ids * 200 + 8192;
+ char *buf;
- cnt = rcu_torture_printk(printk_buf);
- pr_alert("%s", printk_buf);
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("rcu-torture: Out of memory, need: %d", size);
+ return;
+ }
+ rcu_torture_printk(buf);
+ pr_alert("%s", buf);
+ kfree(buf);
}
/*
@@ -1578,6 +1579,7 @@ static int rcu_torture_barrier_cbs(void *arg)
{
long myid = (long)arg;
bool lastphase = 0;
+ bool newphase;
struct rcu_head rcu;
init_rcu_head_on_stack(&rcu);
@@ -1585,10 +1587,11 @@ static int rcu_torture_barrier_cbs(void *arg)
set_user_nice(current, 19);
do {
wait_event(barrier_cbs_wq[myid],
- barrier_phase != lastphase ||
+ (newphase =
+ ACCESS_ONCE(barrier_phase)) != lastphase ||
kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP);
- lastphase = barrier_phase;
+ lastphase = newphase;
smp_mb(); /* ensure barrier_phase load before ->call(). */
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
break;
@@ -1625,7 +1628,7 @@ static int rcu_torture_barrier(void *arg)
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
break;
n_barrier_attempts++;
- cur_ops->cb_barrier();
+ cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
n_rcu_torture_barrier_error++;
WARN_ON_ONCE(1);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index dd08198..b3d116c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -369,6 +369,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
bool user)
{
+ struct rcu_state *rsp;
+ struct rcu_data *rdp;
+
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused =
@@ -380,6 +383,10 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
}
+ for_each_rcu_flavor(rsp) {
+ rdp = this_cpu_ptr(rsp->rda);
+ do_nocb_deferred_wakeup(rdp);
+ }
rcu_prepare_for_idle(smp_processor_id());
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
@@ -411,11 +418,12 @@ static void rcu_eqs_enter(bool user)
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
- if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
+ if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
rdtp->dynticks_nesting = 0;
- else
+ rcu_eqs_enter_common(rdtp, oldval, user);
+ } else {
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
- rcu_eqs_enter_common(rdtp, oldval, user);
+ }
}
/**
@@ -533,11 +541,12 @@ static void rcu_eqs_exit(bool user)
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(oldval < 0);
- if (oldval & DYNTICK_TASK_NEST_MASK)
+ if (oldval & DYNTICK_TASK_NEST_MASK) {
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
- else
+ } else {
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
- rcu_eqs_exit_common(rdtp, oldval, user);
+ rcu_eqs_exit_common(rdtp, oldval, user);
+ }
}
/**
@@ -716,7 +725,7 @@ bool rcu_lockdep_current_cpu_online(void)
bool ret;
if (in_nmi())
- return 1;
+ return true;
preempt_disable();
rdp = this_cpu_ptr(&rcu_sched_data);
rnp = rdp->mynode;
@@ -755,6 +764,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
}
/*
+ * This function really isn't for public consumption, but RCU is special in
+ * that context switches can allow the state machine to make progress.
+ */
+extern void resched_cpu(int cpu);
+
+/*
* Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks
* idle state since the last call to dyntick_save_progress_counter()
@@ -812,16 +827,34 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
*/
rcu_kick_nohz_cpu(rdp->cpu);
+ /*
+ * Alternatively, the CPU might be running in the kernel
+ * for an extended period of time without a quiescent state.
+ * Attempt to force the CPU through the scheduler to gain the
+ * needed quiescent state, but only if the grace period has gone
+ * on for an uncommonly long time. If there are many stuck CPUs,
+ * we will beat on the first one until it gets unstuck, then move
+ * to the next. Only do this for the primary flavor of RCU.
+ */
+ if (rdp->rsp == rcu_state &&
+ ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) {
+ rdp->rsp->jiffies_resched += 5;
+ resched_cpu(rdp->cpu);
+ }
+
return 0;
}
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
unsigned long j = ACCESS_ONCE(jiffies);
+ unsigned long j1;
rsp->gp_start = j;
smp_wmb(); /* Record start time before stall time. */
- rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
+ j1 = rcu_jiffies_till_stall_check();
+ rsp->jiffies_stall = j + j1;
+ rsp->jiffies_resched = j + j1 / 2;
}
/*
@@ -1133,8 +1166,10 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
* hold it, acquire the root rcu_node structure's lock in order to
* start one (if needed).
*/
- if (rnp != rnp_root)
+ if (rnp != rnp_root) {
raw_spin_lock(&rnp_root->lock);
+ smp_mb__after_unlock_lock();
+ }
/*
* Get a new grace-period number. If there really is no grace
@@ -1354,6 +1389,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_restore(flags);
return;
}
+ smp_mb__after_unlock_lock();
__note_gp_changes(rsp, rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -1368,6 +1404,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
if (rsp->gp_flags == 0) {
/* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq(&rnp->lock);
@@ -1409,6 +1446,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
*/
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
rdp = this_cpu_ptr(rsp->rda);
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
@@ -1463,6 +1501,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
/* Clear flag to prevent immediate re-entry. */
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
raw_spin_unlock_irq(&rnp->lock);
}
@@ -1480,6 +1519,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
struct rcu_node *rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
gp_duration = jiffies - rsp->gp_start;
if (gp_duration > rsp->gp_max)
rsp->gp_max = gp_duration;
@@ -1505,16 +1545,19 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
*/
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
ACCESS_ONCE(rnp->completed) = rsp->gpnum;
rdp = this_cpu_ptr(rsp->rda);
if (rnp == rdp->mynode)
__note_gp_changes(rsp, rnp, rdp);
+ /* smp_mb() provided by prior unlock-lock pair. */
nocb += rcu_future_gp_cleanup(rsp, rnp);
raw_spin_unlock_irq(&rnp->lock);
cond_resched();
}
rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
rcu_nocb_gp_set(rnp, nocb);
rsp->completed = rsp->gpnum; /* Declare grace period done. */
@@ -1553,6 +1596,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
wait_event_interruptible(rsp->gp_wq,
ACCESS_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
+ /* Locking provides needed memory barrier. */
if (rcu_gp_init(rsp))
break;
cond_resched();
@@ -1582,6 +1626,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
(!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)),
j);
+ /* Locking provides needed memory barriers. */
/* If grace period done, leave loop. */
if (!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp))
@@ -1749,6 +1794,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
rnp_c = rnp;
rnp = rnp->parent;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
WARN_ON_ONCE(rnp_c->qsmask);
}
@@ -1778,6 +1824,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
rnp->completed == rnp->gpnum) {
@@ -1901,13 +1948,13 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
* Adopt the RCU callbacks from the specified rcu_state structure's
* orphanage. The caller must hold the ->orphan_lock.
*/
-static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
+static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
{
int i;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
/* No-CBs CPUs are handled specially. */
- if (rcu_nocb_adopt_orphan_cbs(rsp, rdp))
+ if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
return;
/* Do the accounting first. */
@@ -1986,12 +2033,13 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
- rcu_adopt_orphan_cbs(rsp);
+ rcu_adopt_orphan_cbs(rsp, flags);
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
mask = rdp->grpmask; /* rnp->grplo is constant. */
do {
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock();
rnp->qsmaskinit &= ~mask;
if (rnp->qsmaskinit != 0) {
if (rnp != rdp->mynode)
@@ -2202,6 +2250,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
cond_resched();
mask = 0;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
if (!rcu_gp_in_progress(rsp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
@@ -2231,6 +2280,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
rnp = rcu_get_root(rsp);
if (rnp->qsmask == 0) {
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
}
}
@@ -2263,6 +2313,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
/* Reached the root of the rcu_node tree, acquire lock. */
raw_spin_lock_irqsave(&rnp_old->lock, flags);
+ smp_mb__after_unlock_lock();
raw_spin_unlock(&rnp_old->fqslock);
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
rsp->n_force_qs_lh++;
@@ -2303,6 +2354,9 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/* If there are callbacks ready, invoke them. */
if (cpu_has_callbacks_ready_to_invoke(rdp))
invoke_rcu_callbacks(rsp, rdp);
+
+ /* Do any needed deferred wakeups of rcuo kthreads. */
+ do_nocb_deferred_wakeup(rdp);
}
/*
@@ -2378,6 +2432,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_node *rnp_root = rcu_get_root(rsp);
raw_spin_lock(&rnp_root->lock);
+ smp_mb__after_unlock_lock();
rcu_start_gp(rsp);
raw_spin_unlock(&rnp_root->lock);
} else {
@@ -2437,7 +2492,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
if (cpu != -1)
rdp = per_cpu_ptr(rsp->rda, cpu);
- offline = !__call_rcu_nocb(rdp, head, lazy);
+ offline = !__call_rcu_nocb(rdp, head, lazy, flags);
WARN_ON_ONCE(offline);
/* _call_rcu() is illegal on offline CPU; leak the callback. */
local_irq_restore(flags);
@@ -2757,6 +2812,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Check for CPU stalls, if enabled. */
check_cpu_stall(rsp, rdp);
+ /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
+ if (rcu_nohz_full_cpu(rsp))
+ return 0;
+
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
rdp->qs_pending && !rdp->passed_quiesce) {
@@ -2790,6 +2849,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
return 1;
}
+ /* Does this CPU need a deferred NOCB wakeup? */
+ if (rcu_nocb_need_deferred_wakeup(rdp)) {
+ rdp->n_rp_nocb_defer_wakeup++;
+ return 1;
+ }
+
/* nothing to do */
rdp->n_rp_need_nothing++;
return 0;
@@ -3214,9 +3279,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
{
int i;
- for (i = rcu_num_lvls - 1; i > 0; i--)
+ rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
+ for (i = rcu_num_lvls - 2; i >= 0; i--)
rsp->levelspread[i] = CONFIG_RCU_FANOUT;
- rsp->levelspread[0] = rcu_fanout_leaf;
}
#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
static void __init rcu_init_levelspread(struct rcu_state *rsp)
@@ -3346,6 +3411,8 @@ static void __init rcu_init_geometry(void)
if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
nr_cpu_ids == NR_CPUS)
return;
+ pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
+ rcu_fanout_leaf, nr_cpu_ids);
/*
* Compute number of nodes that can be handled an rcu_node tree
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 52be957..8c19873 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -317,6 +317,7 @@ struct rcu_data {
unsigned long n_rp_cpu_needs_gp;
unsigned long n_rp_gp_completed;
unsigned long n_rp_gp_started;
+ unsigned long n_rp_nocb_defer_wakeup;
unsigned long n_rp_need_nothing;
/* 6) _rcu_barrier() and OOM callbacks. */
@@ -335,6 +336,7 @@ struct rcu_data {
int nocb_p_count_lazy; /* (approximate). */
wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
struct task_struct *nocb_kthread;
+ bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
/* 8) RCU CPU stall data. */
@@ -453,6 +455,8 @@ struct rcu_state {
/* but in jiffies. */
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
+ unsigned long jiffies_resched; /* Time at which to resched */
+ /* a reluctant CPU. */
unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */
const char *name; /* Name of structure. */
@@ -548,9 +552,12 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
static void rcu_init_one_nocb(struct rcu_node *rnp);
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
- bool lazy);
+ bool lazy, unsigned long flags);
static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
- struct rcu_data *rdp);
+ struct rcu_data *rdp,
+ unsigned long flags);
+static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
static void rcu_kick_nohz_cpu(int cpu);
@@ -564,6 +571,7 @@ static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
unsigned long maxj);
static void rcu_bind_gp_kthread(void);
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
+static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 08a7652..6e2ef4b 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -204,6 +204,7 @@ static void rcu_preempt_note_context_switch(int cpu)
rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
t->rcu_blocked_node = rnp;
@@ -312,6 +313,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock();
rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
}
@@ -361,10 +363,14 @@ void rcu_read_unlock_special(struct task_struct *t)
special = t->rcu_read_unlock_special;
if (special & RCU_READ_UNLOCK_NEED_QS) {
rcu_preempt_qs(smp_processor_id());
+ if (!t->rcu_read_unlock_special) {
+ local_irq_restore(flags);
+ return;
+ }
}
- /* Hardware IRQ handlers cannot block. */
- if (in_irq() || in_serving_softirq()) {
+ /* Hardware IRQ handlers cannot block, complain if they get here. */
+ if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
local_irq_restore(flags);
return;
}
@@ -381,6 +387,7 @@ void rcu_read_unlock_special(struct task_struct *t)
for (;;) {
rnp = t->rcu_blocked_node;
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock();
if (rnp == t->rcu_blocked_node)
break;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
@@ -605,6 +612,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
while (!list_empty(lp)) {
t = list_entry(lp->next, typeof(*t), rcu_node_entry);
raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
+ smp_mb__after_unlock_lock();
list_del(&t->rcu_node_entry);
t->rcu_blocked_node = rnp_root;
list_add(&t->rcu_node_entry, lp_root);
@@ -629,6 +637,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
* in this case.
*/
raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
+ smp_mb__after_unlock_lock();
if (rnp_root->boost_tasks != NULL &&
rnp_root->boost_tasks != rnp_root->gp_tasks &&
rnp_root->boost_tasks != rnp_root->exp_tasks)
@@ -772,6 +781,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
unsigned long mask;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
for (;;) {
if (!sync_rcu_preempt_exp_done(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -779,14 +789,17 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
}
if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- if (wake)
+ if (wake) {
+ smp_mb(); /* EGP done before wake_up(). */
wake_up(&sync_rcu_preempt_exp_wq);
+ }
break;
}
mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
rnp = rnp->parent;
raw_spin_lock(&rnp->lock); /* irqs already disabled */
+ smp_mb__after_unlock_lock();
rnp->expmask &= ~mask;
}
}
@@ -806,6 +819,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
int must_wait = 0;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
if (list_empty(&rnp->blkd_tasks)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else {
@@ -886,6 +900,7 @@ void synchronize_rcu_expedited(void)
/* Initialize ->expmask for all non-leaf rcu_node structures. */
rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
rnp->expmask = rnp->qsmaskinit;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -1191,6 +1206,7 @@ static int rcu_boost(struct rcu_node *rnp)
return 0; /* Nothing left to boost. */
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
/*
* Recheck under the lock: all tasks in need of boosting
@@ -1377,6 +1393,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
sp.sched_priority = RCU_BOOST_PRIO;
@@ -1769,6 +1786,7 @@ static void rcu_prepare_for_idle(int cpu)
continue;
rnp = rdp->mynode;
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock();
rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
@@ -1852,6 +1870,7 @@ static int rcu_oom_notify(struct notifier_block *self,
/* Wait for callbacks from earlier instance to complete. */
wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
+ smp_mb(); /* Ensure callback reuse happens after callback invocation. */
/*
* Prevent premature wakeup: ensure that all increments happen
@@ -2101,7 +2120,8 @@ bool rcu_is_nocb_cpu(int cpu)
static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
struct rcu_head *rhp,
struct rcu_head **rhtp,
- int rhcount, int rhcount_lazy)
+ int rhcount, int rhcount_lazy,
+ unsigned long flags)
{
int len;
struct rcu_head **old_rhpp;
@@ -2122,9 +2142,16 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
}
len = atomic_long_read(&rdp->nocb_q_count);
if (old_rhpp == &rdp->nocb_head) {
- wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
+ if (!irqs_disabled_flags(flags)) {
+ wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WakeEmpty"));
+ } else {
+ rdp->nocb_defer_wakeup = true;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WakeEmptyIsDeferred"));
+ }
rdp->qlen_last_fqs_check = 0;
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeEmpty"));
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
wake_up_process(t); /* ... or if many callbacks queued. */
rdp->qlen_last_fqs_check = LONG_MAX / 2;
@@ -2145,12 +2172,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
* "rcuo" kthread can find it.
*/
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
- bool lazy)
+ bool lazy, unsigned long flags)
{
if (!rcu_is_nocb_cpu(rdp->cpu))
return 0;
- __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
+ __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
trace_rcu_kfree_callback(rdp->rsp->name, rhp,
(unsigned long)rhp->func,
@@ -2168,7 +2195,8 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
* not a no-CBs CPU.
*/
static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
- struct rcu_data *rdp)
+ struct rcu_data *rdp,
+ unsigned long flags)
{
long ql = rsp->qlen;
long qll = rsp->qlen_lazy;
@@ -2182,14 +2210,14 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
/* First, enqueue the donelist, if any. This preserves CB ordering. */
if (rsp->orphan_donelist != NULL) {
__call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
- rsp->orphan_donetail, ql, qll);
+ rsp->orphan_donetail, ql, qll, flags);
ql = qll = 0;
rsp->orphan_donelist = NULL;
rsp->orphan_donetail = &rsp->orphan_donelist;
}
if (rsp->orphan_nxtlist != NULL) {
__call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
- rsp->orphan_nxttail, ql, qll);
+ rsp->orphan_nxttail, ql, qll, flags);
ql = qll = 0;
rsp->orphan_nxtlist = NULL;
rsp->orphan_nxttail = &rsp->orphan_nxtlist;
@@ -2209,6 +2237,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
struct rcu_node *rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
c = rcu_start_future_gp(rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -2250,6 +2279,7 @@ static int rcu_nocb_kthread(void *arg)
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("Sleep"));
wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+ /* Memory barrier provide by xchg() below. */
} else if (firsttime) {
firsttime = 0;
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
@@ -2310,6 +2340,22 @@ static int rcu_nocb_kthread(void *arg)
return 0;
}
+/* Is a deferred wakeup of rcu_nocb_kthread() required? */
+static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+{
+ return ACCESS_ONCE(rdp->nocb_defer_wakeup);
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread(). */
+static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+ if (!rcu_nocb_need_deferred_wakeup(rdp))
+ return;
+ ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
+ wake_up(&rdp->nocb_wq);
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
+}
+
/* Initialize per-rcu_data variables for no-CBs CPUs. */
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
@@ -2365,13 +2411,14 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
}
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
- bool lazy)
+ bool lazy, unsigned long flags)
{
return 0;
}
static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
- struct rcu_data *rdp)
+ struct rcu_data *rdp,
+ unsigned long flags)
{
return 0;
}
@@ -2380,6 +2427,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
}
+static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+{
+ return false;
+}
+
+static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+}
+
static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
{
}
@@ -2829,3 +2885,23 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
}
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+/*
+ * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
+ * grace-period kthread will do force_quiescent_state() processing?
+ * The idea is to avoid waking up RCU core processing on such a
+ * CPU unless the grace period has extended for too long.
+ *
+ * This code relies on the fact that all NO_HZ_FULL CPUs are also
+ * CONFIG_RCU_NOCB_CPUs.
+ */
+static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ if (tick_nohz_full_cpu(smp_processor_id()) &&
+ (!rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
+ return 1;
+#endif /* #ifdef CONFIG_NO_HZ_FULL */
+ return 0;
+}
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 3596797..4def475 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -364,9 +364,10 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
rdp->n_rp_report_qs,
rdp->n_rp_cb_ready,
rdp->n_rp_cpu_needs_gp);
- seq_printf(m, "gpc=%ld gps=%ld nn=%ld\n",
+ seq_printf(m, "gpc=%ld gps=%ld nn=%ld ndw%ld\n",
rdp->n_rp_gp_completed,
rdp->n_rp_gp_started,
+ rdp->n_rp_nocb_defer_wakeup,
rdp->n_rp_need_nothing);
}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 6cb3dff..802365c 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -128,6 +128,11 @@ struct lockdep_map rcu_sched_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
+static struct lock_class_key rcu_callback_key;
+struct lockdep_map rcu_callback_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
+EXPORT_SYMBOL_GPL(rcu_callback_map);
+
int notrace debug_lockdep_rcu_enabled(void)
{
return rcu_scheduler_active && debug_locks &&
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 7b62140..9a95c8c 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -11,9 +11,10 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
-obj-y += core.o proc.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o
+obj-y += core.o proc.o clock.o cputime.o
+obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o completion.o
-obj-$(CONFIG_SMP) += cpupri.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c3ae144..6bd6a67 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -26,9 +26,10 @@
* at 0 on boot (but people really shouldn't rely on that).
*
* cpu_clock(i) -- can be used from any context, including NMI.
- * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
* local_clock() -- is cpu_clock() on the current cpu.
*
+ * sched_clock_cpu(i)
+ *
* How:
*
* The implementation either uses sched_clock() when
@@ -50,15 +51,6 @@
* Furthermore, explicit sleep and wakeup hooks allow us to account for time
* that is otherwise invisible (TSC gets stopped).
*
- *
- * Notes:
- *
- * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
- * like cpufreq interrupts that can change the base clock (TSC) multiplier
- * and cause funny jumps in time -- although the filtering provided by
- * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
- * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
- * sched_clock().
*/
#include <linux/spinlock.h>
#include <linux/hardirq.h>
@@ -66,6 +58,8 @@
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>
+#include <linux/static_key.h>
+#include <linux/workqueue.h>
/*
* Scheduler clock - returns current time in nanosec units.
@@ -82,7 +76,37 @@ EXPORT_SYMBOL_GPL(sched_clock);
__read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-__read_mostly int sched_clock_stable;
+static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
+
+int sched_clock_stable(void)
+{
+ if (static_key_false(&__sched_clock_stable))
+ return false;
+ return true;
+}
+
+void set_sched_clock_stable(void)
+{
+ if (!sched_clock_stable())
+ static_key_slow_dec(&__sched_clock_stable);
+}
+
+static void __clear_sched_clock_stable(struct work_struct *work)
+{
+ /* XXX worry about clock continuity */
+ if (sched_clock_stable())
+ static_key_slow_inc(&__sched_clock_stable);
+}
+
+static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
+
+void clear_sched_clock_stable(void)
+{
+ if (keventd_up())
+ schedule_work(&sched_clock_work);
+ else
+ __clear_sched_clock_stable(&sched_clock_work);
+}
struct sched_clock_data {
u64 tick_raw;
@@ -242,20 +266,20 @@ u64 sched_clock_cpu(int cpu)
struct sched_clock_data *scd;
u64 clock;
- WARN_ON_ONCE(!irqs_disabled());
-
- if (sched_clock_stable)
+ if (sched_clock_stable())
return sched_clock();
if (unlikely(!sched_clock_running))
return 0ull;
+ preempt_disable();
scd = cpu_sdc(cpu);
if (cpu != smp_processor_id())
clock = sched_clock_remote(scd);
else
clock = sched_clock_local(scd);
+ preempt_enable();
return clock;
}
@@ -265,7 +289,7 @@ void sched_clock_tick(void)
struct sched_clock_data *scd;
u64 now, now_gtod;
- if (sched_clock_stable)
+ if (sched_clock_stable())
return;
if (unlikely(!sched_clock_running))
@@ -316,14 +340,10 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
*/
u64 cpu_clock(int cpu)
{
- u64 clock;
- unsigned long flags;
-
- local_irq_save(flags);
- clock = sched_clock_cpu(cpu);
- local_irq_restore(flags);
+ if (static_key_false(&__sched_clock_stable))
+ return sched_clock_cpu(cpu);
- return clock;
+ return sched_clock();
}
/*
@@ -335,14 +355,10 @@ u64 cpu_clock(int cpu)
*/
u64 local_clock(void)
{
- u64 clock;
- unsigned long flags;
+ if (static_key_false(&__sched_clock_stable))
+ return sched_clock_cpu(raw_smp_processor_id());
- local_irq_save(flags);
- clock = sched_clock_cpu(smp_processor_id());
- local_irq_restore(flags);
-
- return clock;
+ return sched_clock();
}
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
@@ -362,12 +378,12 @@ u64 sched_clock_cpu(int cpu)
u64 cpu_clock(int cpu)
{
- return sched_clock_cpu(cpu);
+ return sched_clock();
}
u64 local_clock(void)
{
- return sched_clock_cpu(0);
+ return sched_clock();
}
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a88f4a4..36c951b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -296,8 +296,6 @@ __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
-
-
/*
* __task_rq_lock - lock the rq @p resides on.
*/
@@ -899,7 +897,9 @@ static inline int normal_prio(struct task_struct *p)
{
int prio;
- if (task_has_rt_policy(p))
+ if (task_has_dl_policy(p))
+ prio = MAX_DL_PRIO-1;
+ else if (task_has_rt_policy(p))
prio = MAX_RT_PRIO-1 - p->rt_priority;
else
prio = __normal_prio(p);
@@ -945,7 +945,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
if (prev_class->switched_from)
prev_class->switched_from(rq, p);
p->sched_class->switched_to(rq, p);
- } else if (oldprio != p->prio)
+ } else if (oldprio != p->prio || dl_task(p))
p->sched_class->prio_changed(rq, p, oldprio);
}
@@ -1499,8 +1499,7 @@ void scheduler_ipi(void)
* TIF_NEED_RESCHED remotely (for the first time) will also send
* this IPI.
*/
- if (tif_need_resched())
- set_preempt_need_resched();
+ preempt_fold_need_resched();
if (llist_empty(&this_rq()->wake_list)
&& !tick_nohz_full_cpu(smp_processor_id())
@@ -1717,6 +1716,13 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
+ RB_CLEAR_NODE(&p->dl.rb_node);
+ hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ p->dl.dl_runtime = p->dl.runtime = 0;
+ p->dl.dl_deadline = p->dl.deadline = 0;
+ p->dl.dl_period = 0;
+ p->dl.flags = 0;
+
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -1768,7 +1774,7 @@ void set_numabalancing_state(bool enabled)
/*
* fork()/clone()-time setup:
*/
-void sched_fork(unsigned long clone_flags, struct task_struct *p)
+int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
int cpu = get_cpu();
@@ -1790,7 +1796,7 @@ void sched_fork(unsigned long clone_flags, struct task_struct *p)
* Revert to default priority/policy on fork if requested.
*/
if (unlikely(p->sched_reset_on_fork)) {
- if (task_has_rt_policy(p)) {
+ if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->policy = SCHED_NORMAL;
p->static_prio = NICE_TO_PRIO(0);
p->rt_priority = 0;
@@ -1807,8 +1813,14 @@ void sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_reset_on_fork = 0;
}
- if (!rt_prio(p->prio))
+ if (dl_prio(p->prio)) {
+ put_cpu();
+ return -EAGAIN;
+ } else if (rt_prio(p->prio)) {
+ p->sched_class = &rt_sched_class;
+ } else {
p->sched_class = &fair_sched_class;
+ }
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
@@ -1834,11 +1846,124 @@ void sched_fork(unsigned long clone_flags, struct task_struct *p)
init_task_preempt_count(p);
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ RB_CLEAR_NODE(&p->pushable_dl_tasks);
#endif
put_cpu();
+ return 0;
+}
+
+unsigned long to_ratio(u64 period, u64 runtime)
+{
+ if (runtime == RUNTIME_INF)
+ return 1ULL << 20;
+
+ /*
+ * Doing this here saves a lot of checks in all
+ * the calling paths, and returning zero seems
+ * safe for them anyway.
+ */
+ if (period == 0)
+ return 0;
+
+ return div64_u64(runtime << 20, period);
+}
+
+#ifdef CONFIG_SMP
+inline struct dl_bw *dl_bw_of(int i)
+{
+ return &cpu_rq(i)->rd->dl_bw;
}
+static inline int dl_bw_cpus(int i)
+{
+ struct root_domain *rd = cpu_rq(i)->rd;
+ int cpus = 0;
+
+ for_each_cpu_and(i, rd->span, cpu_active_mask)
+ cpus++;
+
+ return cpus;
+}
+#else
+inline struct dl_bw *dl_bw_of(int i)
+{
+ return &cpu_rq(i)->dl.dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+ return 1;
+}
+#endif
+
+static inline
+void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
+{
+ dl_b->total_bw -= tsk_bw;
+}
+
+static inline
+void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
+{
+ dl_b->total_bw += tsk_bw;
+}
+
+static inline
+bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
+{
+ return dl_b->bw != -1 &&
+ dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
+}
+
+/*
+ * We must be sure that accepting a new task (or allowing changing the
+ * parameters of an existing one) is consistent with the bandwidth
+ * constraints. If yes, this function also accordingly updates the currently
+ * allocated bandwidth to reflect the new situation.
+ *
+ * This function is called while holding p's rq->lock.
+ */
+static int dl_overflow(struct task_struct *p, int policy,
+ const struct sched_attr *attr)
+{
+
+ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+ u64 period = attr->sched_period;
+ u64 runtime = attr->sched_runtime;
+ u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
+ int cpus, err = -1;
+
+ if (new_bw == p->dl.dl_bw)
+ return 0;
+
+ /*
+ * Either if a task, enters, leave, or stays -deadline but changes
+ * its parameters, we may need to update accordingly the total
+ * allocated bandwidth of the container.
+ */
+ raw_spin_lock(&dl_b->lock);
+ cpus = dl_bw_cpus(task_cpu(p));
+ if (dl_policy(policy) && !task_has_dl_policy(p) &&
+ !__dl_overflow(dl_b, cpus, 0, new_bw)) {
+ __dl_add(dl_b, new_bw);
+ err = 0;
+ } else if (dl_policy(policy) && task_has_dl_policy(p) &&
+ !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
+ __dl_clear(dl_b, p->dl.dl_bw);
+ __dl_add(dl_b, new_bw);
+ err = 0;
+ } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
+ __dl_clear(dl_b, p->dl.dl_bw);
+ err = 0;
+ }
+ raw_spin_unlock(&dl_b->lock);
+
+ return err;
+}
+
+extern void init_dl_bw(struct dl_bw *dl_b);
+
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
@@ -2003,6 +2128,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
if (unlikely(prev_state == TASK_DEAD)) {
task_numa_free(prev);
+ if (prev->sched_class->task_dead)
+ prev->sched_class->task_dead(prev);
+
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
@@ -2296,7 +2424,7 @@ void scheduler_tick(void)
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
- trigger_load_balance(rq, cpu);
+ trigger_load_balance(rq);
#endif
rq_last_tick_reset(rq);
}
@@ -2414,10 +2542,10 @@ static inline void schedule_debug(struct task_struct *prev)
{
/*
* Test if we are atomic. Since do_exit() needs to call into
- * schedule() atomically, we ignore that path for now.
- * Otherwise, whine if we are scheduling when we should not be.
+ * schedule() atomically, we ignore that path. Otherwise whine
+ * if we are scheduling when we should not.
*/
- if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
+ if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
__schedule_bug(prev);
rcu_sleep_check();
@@ -2761,11 +2889,11 @@ EXPORT_SYMBOL(sleep_on_timeout);
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
- int oldprio, on_rq, running;
+ int oldprio, on_rq, running, enqueue_flag = 0;
struct rq *rq;
const struct sched_class *prev_class;
- BUG_ON(prio < 0 || prio > MAX_PRIO);
+ BUG_ON(prio > MAX_PRIO);
rq = __task_rq_lock(p);
@@ -2788,6 +2916,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
}
trace_sched_pi_setprio(p, prio);
+ p->pi_top_task = rt_mutex_get_top_task(p);
oldprio = p->prio;
prev_class = p->sched_class;
on_rq = p->on_rq;
@@ -2797,23 +2926,49 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (running)
p->sched_class->put_prev_task(rq, p);
- if (rt_prio(prio))
+ /*
+ * Boosting condition are:
+ * 1. -rt task is running and holds mutex A
+ * --> -dl task blocks on mutex A
+ *
+ * 2. -dl task is running and holds mutex A
+ * --> -dl task blocks on mutex A and could preempt the
+ * running task
+ */
+ if (dl_prio(prio)) {
+ if (!dl_prio(p->normal_prio) || (p->pi_top_task &&
+ dl_entity_preempt(&p->pi_top_task->dl, &p->dl))) {
+ p->dl.dl_boosted = 1;
+ p->dl.dl_throttled = 0;
+ enqueue_flag = ENQUEUE_REPLENISH;
+ } else
+ p->dl.dl_boosted = 0;
+ p->sched_class = &dl_sched_class;
+ } else if (rt_prio(prio)) {
+ if (dl_prio(oldprio))
+ p->dl.dl_boosted = 0;
+ if (oldprio < prio)
+ enqueue_flag = ENQUEUE_HEAD;
p->sched_class = &rt_sched_class;
- else
+ } else {
+ if (dl_prio(oldprio))
+ p->dl.dl_boosted = 0;
p->sched_class = &fair_sched_class;
+ }
p->prio = prio;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
- enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
+ enqueue_task(rq, p, enqueue_flag);
check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
__task_rq_unlock(rq);
}
#endif
+
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, on_rq;
@@ -2831,9 +2986,9 @@ void set_user_nice(struct task_struct *p, long nice)
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
- * SCHED_FIFO/SCHED_RR:
+ * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
*/
- if (task_has_rt_policy(p)) {
+ if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
@@ -2988,22 +3143,95 @@ static struct task_struct *find_process_by_pid(pid_t pid)
return pid ? find_task_by_vpid(pid) : current;
}
-/* Actually do priority change: must hold rq lock. */
+/*
+ * This function initializes the sched_dl_entity of a newly becoming
+ * SCHED_DEADLINE task.
+ *
+ * Only the static values are considered here, the actual runtime and the
+ * absolute deadline will be properly calculated when the task is enqueued
+ * for the first time with its new policy.
+ */
static void
-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
+__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
+{
+ struct sched_dl_entity *dl_se = &p->dl;
+
+ init_dl_task_timer(dl_se);
+ dl_se->dl_runtime = attr->sched_runtime;
+ dl_se->dl_deadline = attr->sched_deadline;
+ dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
+ dl_se->flags = attr->sched_flags;
+ dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+ dl_se->dl_throttled = 0;
+ dl_se->dl_new = 1;
+}
+
+/* Actually do priority change: must hold pi & rq lock. */
+static void __setscheduler(struct rq *rq, struct task_struct *p,
+ const struct sched_attr *attr)
{
+ int policy = attr->sched_policy;
+
+ if (policy == -1) /* setparam */
+ policy = p->policy;
+
p->policy = policy;
- p->rt_priority = prio;
+
+ if (dl_policy(policy))
+ __setparam_dl(p, attr);
+ else if (fair_policy(policy))
+ p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+
+ /*
+ * __sched_setscheduler() ensures attr->sched_priority == 0 when
+ * !rt_policy. Always setting this ensures that things like
+ * getparam()/getattr() don't report silly values for !rt tasks.
+ */
+ p->rt_priority = attr->sched_priority;
+
p->normal_prio = normal_prio(p);
- /* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
- if (rt_prio(p->prio))
+
+ if (dl_prio(p->prio))
+ p->sched_class = &dl_sched_class;
+ else if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
+
set_load_weight(p);
}
+static void
+__getparam_dl(struct task_struct *p, struct sched_attr *attr)
+{
+ struct sched_dl_entity *dl_se = &p->dl;
+
+ attr->sched_priority = p->rt_priority;
+ attr->sched_runtime = dl_se->dl_runtime;
+ attr->sched_deadline = dl_se->dl_deadline;
+ attr->sched_period = dl_se->dl_period;
+ attr->sched_flags = dl_se->flags;
+}
+
+/*
+ * This function validates the new parameters of a -deadline task.
+ * We ask for the deadline not being zero, and greater or equal
+ * than the runtime, as well as the period of being zero or
+ * greater than deadline. Furthermore, we have to be sure that
+ * user parameters are above the internal resolution (1us); we
+ * check sched_runtime only since it is always the smaller one.
+ */
+static bool
+__checkparam_dl(const struct sched_attr *attr)
+{
+ return attr && attr->sched_deadline != 0 &&
+ (attr->sched_period == 0 ||
+ (s64)(attr->sched_period - attr->sched_deadline) >= 0) &&
+ (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 &&
+ attr->sched_runtime >= (2 << (DL_SCALE - 1));
+}
+
/*
* check the target process has a UID that matches the current process's
*/
@@ -3020,10 +3248,12 @@ static bool check_same_owner(struct task_struct *p)
return match;
}
-static int __sched_setscheduler(struct task_struct *p, int policy,
- const struct sched_param *param, bool user)
+static int __sched_setscheduler(struct task_struct *p,
+ const struct sched_attr *attr,
+ bool user)
{
int retval, oldprio, oldpolicy = -1, on_rq, running;
+ int policy = attr->sched_policy;
unsigned long flags;
const struct sched_class *prev_class;
struct rq *rq;
@@ -3037,31 +3267,40 @@ recheck:
reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
} else {
- reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
- policy &= ~SCHED_RESET_ON_FORK;
+ reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
- if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ if (policy != SCHED_DEADLINE &&
+ policy != SCHED_FIFO && policy != SCHED_RR &&
policy != SCHED_NORMAL && policy != SCHED_BATCH &&
policy != SCHED_IDLE)
return -EINVAL;
}
+ if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
+ return -EINVAL;
+
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
- if (param->sched_priority < 0 ||
- (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
- (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
+ if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
+ (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
return -EINVAL;
- if (rt_policy(policy) != (param->sched_priority != 0))
+ if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
+ (rt_policy(policy) != (attr->sched_priority != 0)))
return -EINVAL;
/*
* Allow unprivileged RT tasks to decrease priority:
*/
if (user && !capable(CAP_SYS_NICE)) {
+ if (fair_policy(policy)) {
+ if (attr->sched_nice < TASK_NICE(p) &&
+ !can_nice(p, attr->sched_nice))
+ return -EPERM;
+ }
+
if (rt_policy(policy)) {
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -3071,8 +3310,8 @@ recheck:
return -EPERM;
/* can't increase priority */
- if (param->sched_priority > p->rt_priority &&
- param->sched_priority > rlim_rtprio)
+ if (attr->sched_priority > p->rt_priority &&
+ attr->sched_priority > rlim_rtprio)
return -EPERM;
}
@@ -3120,14 +3359,21 @@ recheck:
/*
* If not changing anything there's no need to proceed further:
*/
- if (unlikely(policy == p->policy && (!rt_policy(policy) ||
- param->sched_priority == p->rt_priority))) {
+ if (unlikely(policy == p->policy)) {
+ if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p))
+ goto change;
+ if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
+ goto change;
+ if (dl_policy(policy))
+ goto change;
+
task_rq_unlock(rq, p, &flags);
return 0;
}
+change:
-#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
+#ifdef CONFIG_RT_GROUP_SCHED
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
@@ -3138,8 +3384,24 @@ recheck:
task_rq_unlock(rq, p, &flags);
return -EPERM;
}
- }
#endif
+#ifdef CONFIG_SMP
+ if (dl_bandwidth_enabled() && dl_policy(policy)) {
+ cpumask_t *span = rq->rd->span;
+
+ /*
+ * Don't allow tasks with an affinity mask smaller than
+ * the entire root_domain to become SCHED_DEADLINE. We
+ * will also fail if there's no bandwidth available.
+ */
+ if (!cpumask_subset(span, &p->cpus_allowed) ||
+ rq->rd->dl_bw.bw == 0) {
+ task_rq_unlock(rq, p, &flags);
+ return -EPERM;
+ }
+ }
+#endif
+ }
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
@@ -3147,6 +3409,17 @@ recheck:
task_rq_unlock(rq, p, &flags);
goto recheck;
}
+
+ /*
+ * If setscheduling to SCHED_DEADLINE (or changing the parameters
+ * of a SCHED_DEADLINE task) we need to check if enough bandwidth
+ * is available.
+ */
+ if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
+ task_rq_unlock(rq, p, &flags);
+ return -EBUSY;
+ }
+
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
@@ -3158,7 +3431,7 @@ recheck:
oldprio = p->prio;
prev_class = p->sched_class;
- __setscheduler(rq, p, policy, param->sched_priority);
+ __setscheduler(rq, p, attr);
if (running)
p->sched_class->set_curr_task(rq);
@@ -3173,6 +3446,26 @@ recheck:
return 0;
}
+static int _sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param, bool check)
+{
+ struct sched_attr attr = {
+ .sched_policy = policy,
+ .sched_priority = param->sched_priority,
+ .sched_nice = PRIO_TO_NICE(p->static_prio),
+ };
+
+ /*
+ * Fixup the legacy SCHED_RESET_ON_FORK hack
+ */
+ if (policy & SCHED_RESET_ON_FORK) {
+ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ policy &= ~SCHED_RESET_ON_FORK;
+ attr.sched_policy = policy;
+ }
+
+ return __sched_setscheduler(p, &attr, check);
+}
/**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
@@ -3186,10 +3479,16 @@ recheck:
int sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param)
{
- return __sched_setscheduler(p, policy, param, true);
+ return _sched_setscheduler(p, policy, param, true);
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, true);
+}
+EXPORT_SYMBOL_GPL(sched_setattr);
+
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
@@ -3206,7 +3505,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
{
- return __sched_setscheduler(p, policy, param, false);
+ return _sched_setscheduler(p, policy, param, false);
}
static int
@@ -3231,6 +3530,79 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
return retval;
}
+/*
+ * Mimics kernel/events/core.c perf_copy_attr().
+ */
+static int sched_copy_attr(struct sched_attr __user *uattr,
+ struct sched_attr *attr)
+{
+ u32 size;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
+ return -EFAULT;
+
+ /*
+ * zero the full structure, so that a short copy will be nice.
+ */
+ memset(attr, 0, sizeof(*attr));
+
+ ret = get_user(size, &uattr->size);
+ if (ret)
+ return ret;
+
+ if (size > PAGE_SIZE) /* silly large */
+ goto err_size;
+
+ if (!size) /* abi compat */
+ size = SCHED_ATTR_SIZE_VER0;
+
+ if (size < SCHED_ATTR_SIZE_VER0)
+ goto err_size;
+
+ /*
+ * If we're handed a bigger struct than we know of,
+ * ensure all the unknown bits are 0 - i.e. new
+ * user-space does not rely on any kernel feature
+ * extensions we dont know about yet.
+ */
+ if (size > sizeof(*attr)) {
+ unsigned char __user *addr;
+ unsigned char __user *end;
+ unsigned char val;
+
+ addr = (void __user *)uattr + sizeof(*attr);
+ end = (void __user *)uattr + size;
+
+ for (; addr < end; addr++) {
+ ret = get_user(val, addr);
+ if (ret)
+ return ret;
+ if (val)
+ goto err_size;
+ }
+ size = sizeof(*attr);
+ }
+
+ ret = copy_from_user(attr, uattr, size);
+ if (ret)
+ return -EFAULT;
+
+ /*
+ * XXX: do we want to be lenient like existing syscalls; or do we want
+ * to be strict and return an error on out-of-bounds values?
+ */
+ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
+
+out:
+ return ret;
+
+err_size:
+ put_user(sizeof(*attr), &uattr->size);
+ ret = -E2BIG;
+ goto out;
+}
+
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
@@ -3262,6 +3634,33 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
}
/**
+ * sys_sched_setattr - same as above, but with extended sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ */
+SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr)
+{
+ struct sched_attr attr;
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0)
+ return -EINVAL;
+
+ if (sched_copy_attr(uattr, &attr))
+ return -EFAULT;
+
+ rcu_read_lock();
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (p != NULL)
+ retval = sched_setattr(p, &attr);
+ rcu_read_unlock();
+
+ return retval;
+}
+
+/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*
@@ -3316,6 +3715,10 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
if (retval)
goto out_unlock;
+ if (task_has_dl_policy(p)) {
+ retval = -EINVAL;
+ goto out_unlock;
+ }
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
@@ -3331,6 +3734,96 @@ out_unlock:
return retval;
}
+static int sched_read_attr(struct sched_attr __user *uattr,
+ struct sched_attr *attr,
+ unsigned int usize)
+{
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, uattr, usize))
+ return -EFAULT;
+
+ /*
+ * If we're handed a smaller struct than we know of,
+ * ensure all the unknown bits are 0 - i.e. old
+ * user-space does not get uncomplete information.
+ */
+ if (usize < sizeof(*attr)) {
+ unsigned char *addr;
+ unsigned char *end;
+
+ addr = (void *)attr + usize;
+ end = (void *)attr + sizeof(*attr);
+
+ for (; addr < end; addr++) {
+ if (*addr)
+ goto err_size;
+ }
+
+ attr->size = usize;
+ }
+
+ ret = copy_to_user(uattr, attr, usize);
+ if (ret)
+ return -EFAULT;
+
+out:
+ return ret;
+
+err_size:
+ ret = -E2BIG;
+ goto out;
+}
+
+/**
+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @size: sizeof(attr) for fwd/bwd comp.
+ */
+SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, size)
+{
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
+ };
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0 || size > PAGE_SIZE ||
+ size < SCHED_ATTR_SIZE_VER0)
+ return -EINVAL;
+
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ attr.sched_policy = p->policy;
+ if (p->sched_reset_on_fork)
+ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ if (task_has_dl_policy(p))
+ __getparam_dl(p, &attr);
+ else if (task_has_rt_policy(p))
+ attr.sched_priority = p->rt_priority;
+ else
+ attr.sched_nice = TASK_NICE(p);
+
+ rcu_read_unlock();
+
+ retval = sched_read_attr(uattr, &attr, size);
+ return retval;
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
cpumask_var_t cpus_allowed, new_mask;
@@ -3375,8 +3868,26 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
if (retval)
goto out_unlock;
+
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
+
+ /*
+ * Since bandwidth control happens on root_domain basis,
+ * if admission test is enabled, we only admit -deadline
+ * tasks allowed to run on all the CPUs in the task's
+ * root_domain.
+ */
+#ifdef CONFIG_SMP
+ if (task_has_dl_policy(p)) {
+ const struct cpumask *span = task_rq(p)->rd->span;
+
+ if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
+ retval = -EBUSY;
+ goto out_unlock;
+ }
+ }
+#endif
again:
retval = set_cpus_allowed_ptr(p, new_mask);
@@ -3653,7 +4164,7 @@ again:
}
double_rq_lock(rq, p_rq);
- while (task_rq(p) != p_rq) {
+ if (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
goto again;
}
@@ -3742,6 +4253,7 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
case SCHED_RR:
ret = MAX_USER_RT_PRIO-1;
break;
+ case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
@@ -3768,6 +4280,7 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
case SCHED_RR:
ret = 1;
break;
+ case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
@@ -4514,13 +5027,31 @@ static int sched_cpu_active(struct notifier_block *nfb,
static int sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
+ unsigned long flags;
+ long cpu = (long)hcpu;
+
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
- set_cpu_active((long)hcpu, false);
+ set_cpu_active(cpu, false);
+
+ /* explicitly allow suspend */
+ if (!(action & CPU_TASKS_FROZEN)) {
+ struct dl_bw *dl_b = dl_bw_of(cpu);
+ bool overflow;
+ int cpus;
+
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
+ cpus = dl_bw_cpus(cpu);
+ overflow = __dl_overflow(dl_b, cpus, 0, 0);
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+ if (overflow)
+ return notifier_from_errno(-EBUSY);
+ }
return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
}
+
+ return NOTIFY_DONE;
}
static int __init migration_init(void)
@@ -4739,6 +5270,8 @@ static void free_rootdomain(struct rcu_head *rcu)
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
cpupri_cleanup(&rd->cpupri);
+ cpudl_cleanup(&rd->cpudl);
+ free_cpumask_var(rd->dlo_mask);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
@@ -4790,8 +5323,14 @@ static int init_rootdomain(struct root_domain *rd)
goto out;
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
- if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
goto free_online;
+ if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ goto free_dlo_mask;
+
+ init_dl_bw(&rd->dl_bw);
+ if (cpudl_init(&rd->cpudl) != 0)
+ goto free_dlo_mask;
if (cpupri_init(&rd->cpupri) != 0)
goto free_rto_mask;
@@ -4799,6 +5338,8 @@ static int init_rootdomain(struct root_domain *rd)
free_rto_mask:
free_cpumask_var(rd->rto_mask);
+free_dlo_mask:
+ free_cpumask_var(rd->dlo_mask);
free_online:
free_cpumask_var(rd->online);
free_span:
@@ -6150,6 +6691,7 @@ void __init sched_init_smp(void)
free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
+ init_sched_dl_class();
}
#else
void __init sched_init_smp(void)
@@ -6219,13 +6761,15 @@ void __init sched_init(void)
#endif /* CONFIG_CPUMASK_OFFSTACK */
}
+ init_rt_bandwidth(&def_rt_bandwidth,
+ global_rt_period(), global_rt_runtime());
+ init_dl_bandwidth(&def_dl_bandwidth,
+ global_rt_period(), global_rt_runtime());
+
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
- init_rt_bandwidth(&def_rt_bandwidth,
- global_rt_period(), global_rt_runtime());
-
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
@@ -6249,6 +6793,7 @@ void __init sched_init(void)
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq);
+ init_dl_rq(&rq->dl, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -6320,10 +6865,6 @@ void __init sched_init(void)
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
-#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters);
-#endif
-
/*
* The boot idle thread does lazy MMU switching as well:
*/
@@ -6397,13 +6938,16 @@ EXPORT_SYMBOL(__might_sleep);
static void normalize_task(struct rq *rq, struct task_struct *p)
{
const struct sched_class *prev_class = p->sched_class;
+ struct sched_attr attr = {
+ .sched_policy = SCHED_NORMAL,
+ };
int old_prio = p->prio;
int on_rq;
on_rq = p->on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
- __setscheduler(rq, p, SCHED_NORMAL, 0);
+ __setscheduler(rq, p, &attr);
if (on_rq) {
enqueue_task(rq, p, 0);
resched_task(rq->curr);
@@ -6433,7 +6977,7 @@ void normalize_rt_tasks(void)
p->se.statistics.block_start = 0;
#endif
- if (!rt_task(p)) {
+ if (!dl_task(p) && !rt_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
@@ -6628,16 +7172,6 @@ void sched_move_task(struct task_struct *tsk)
}
#endif /* CONFIG_CGROUP_SCHED */
-#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
-static unsigned long to_ratio(u64 period, u64 runtime)
-{
- if (runtime == RUNTIME_INF)
- return 1ULL << 20;
-
- return div64_u64(runtime << 20, period);
-}
-#endif
-
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
@@ -6811,24 +7345,13 @@ static long sched_group_rt_period(struct task_group *tg)
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
+#endif /* CONFIG_RT_GROUP_SCHED */
+#ifdef CONFIG_RT_GROUP_SCHED
static int sched_rt_global_constraints(void)
{
- u64 runtime, period;
int ret = 0;
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
-
- runtime = global_rt_runtime();
- period = global_rt_period();
-
- /*
- * Sanity check on the sysctl variables.
- */
- if (runtime > period && runtime != RUNTIME_INF)
- return -EINVAL;
-
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
@@ -6851,17 +7374,7 @@ static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
static int sched_rt_global_constraints(void)
{
unsigned long flags;
- int i;
-
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
-
- /*
- * There's always some RT tasks in the root group
- * -- migration, kstopmachine etc..
- */
- if (sysctl_sched_rt_runtime == 0)
- return -EBUSY;
+ int i, ret = 0;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
@@ -6873,36 +7386,88 @@ static int sched_rt_global_constraints(void)
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
- return 0;
+ return ret;
}
#endif /* CONFIG_RT_GROUP_SCHED */
-int sched_rr_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+static int sched_dl_global_constraints(void)
{
- int ret;
- static DEFINE_MUTEX(mutex);
+ u64 runtime = global_rt_runtime();
+ u64 period = global_rt_period();
+ u64 new_bw = to_ratio(period, runtime);
+ int cpu, ret = 0;
- mutex_lock(&mutex);
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- /* make sure that internally we keep jiffies */
- /* also, writing zero resets timeslice to default */
- if (!ret && write) {
- sched_rr_timeslice = sched_rr_timeslice <= 0 ?
- RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+ /*
+ * Here we want to check the bandwidth not being set to some
+ * value smaller than the currently allocated bandwidth in
+ * any of the root_domains.
+ *
+ * FIXME: Cycling on all the CPUs is overdoing, but simpler than
+ * cycling on root_domains... Discussion on different/better
+ * solutions is welcome!
+ */
+ for_each_possible_cpu(cpu) {
+ struct dl_bw *dl_b = dl_bw_of(cpu);
+
+ raw_spin_lock(&dl_b->lock);
+ if (new_bw < dl_b->total_bw)
+ ret = -EBUSY;
+ raw_spin_unlock(&dl_b->lock);
+
+ if (ret)
+ break;
}
- mutex_unlock(&mutex);
+
return ret;
}
+static void sched_dl_do_global(void)
+{
+ u64 new_bw = -1;
+ int cpu;
+
+ def_dl_bandwidth.dl_period = global_rt_period();
+ def_dl_bandwidth.dl_runtime = global_rt_runtime();
+
+ if (global_rt_runtime() != RUNTIME_INF)
+ new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+
+ /*
+ * FIXME: As above...
+ */
+ for_each_possible_cpu(cpu) {
+ struct dl_bw *dl_b = dl_bw_of(cpu);
+
+ raw_spin_lock(&dl_b->lock);
+ dl_b->bw = new_bw;
+ raw_spin_unlock(&dl_b->lock);
+ }
+}
+
+static int sched_rt_global_validate(void)
+{
+ if (sysctl_sched_rt_period <= 0)
+ return -EINVAL;
+
+ if (sysctl_sched_rt_runtime > sysctl_sched_rt_period)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sched_rt_do_global(void)
+{
+ def_rt_bandwidth.rt_runtime = global_rt_runtime();
+ def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
+}
+
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret;
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
+ int ret;
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
@@ -6911,21 +7476,50 @@ int sched_rt_handler(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
+ ret = sched_rt_global_validate();
+ if (ret)
+ goto undo;
+
ret = sched_rt_global_constraints();
- if (ret) {
- sysctl_sched_rt_period = old_period;
- sysctl_sched_rt_runtime = old_runtime;
- } else {
- def_rt_bandwidth.rt_runtime = global_rt_runtime();
- def_rt_bandwidth.rt_period =
- ns_to_ktime(global_rt_period());
- }
+ if (ret)
+ goto undo;
+
+ ret = sched_dl_global_constraints();
+ if (ret)
+ goto undo;
+
+ sched_rt_do_global();
+ sched_dl_do_global();
+ }
+ if (0) {
+undo:
+ sysctl_sched_rt_period = old_period;
+ sysctl_sched_rt_runtime = old_runtime;
}
mutex_unlock(&mutex);
return ret;
}
+int sched_rr_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ /* make sure that internally we keep jiffies */
+ /* also, writing zero resets timeslice to default */
+ if (!ret && write) {
+ sched_rr_timeslice = sched_rr_timeslice <= 0 ?
+ RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+ }
+ mutex_unlock(&mutex);
+ return ret;
+}
+
#ifdef CONFIG_CGROUP_SCHED
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
new file mode 100644
index 0000000..045fc74
--- /dev/null
+++ b/kernel/sched/cpudeadline.c
@@ -0,0 +1,216 @@
+/*
+ * kernel/sched/cpudl.c
+ *
+ * Global CPU deadline management
+ *
+ * Author: Juri Lelli <j.lelli@sssup.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include "cpudeadline.h"
+
+static inline int parent(int i)
+{
+ return (i - 1) >> 1;
+}
+
+static inline int left_child(int i)
+{
+ return (i << 1) + 1;
+}
+
+static inline int right_child(int i)
+{
+ return (i << 1) + 2;
+}
+
+static inline int dl_time_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
+static void cpudl_exchange(struct cpudl *cp, int a, int b)
+{
+ int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
+
+ swap(cp->elements[a], cp->elements[b]);
+ swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]);
+}
+
+static void cpudl_heapify(struct cpudl *cp, int idx)
+{
+ int l, r, largest;
+
+ /* adapted from lib/prio_heap.c */
+ while(1) {
+ l = left_child(idx);
+ r = right_child(idx);
+ largest = idx;
+
+ if ((l < cp->size) && dl_time_before(cp->elements[idx].dl,
+ cp->elements[l].dl))
+ largest = l;
+ if ((r < cp->size) && dl_time_before(cp->elements[largest].dl,
+ cp->elements[r].dl))
+ largest = r;
+ if (largest == idx)
+ break;
+
+ /* Push idx down the heap one level and bump one up */
+ cpudl_exchange(cp, largest, idx);
+ idx = largest;
+ }
+}
+
+static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
+{
+ WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID);
+
+ if (dl_time_before(new_dl, cp->elements[idx].dl)) {
+ cp->elements[idx].dl = new_dl;
+ cpudl_heapify(cp, idx);
+ } else {
+ cp->elements[idx].dl = new_dl;
+ while (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
+ cp->elements[idx].dl)) {
+ cpudl_exchange(cp, idx, parent(idx));
+ idx = parent(idx);
+ }
+ }
+}
+
+static inline int cpudl_maximum(struct cpudl *cp)
+{
+ return cp->elements[0].cpu;
+}
+
+/*
+ * cpudl_find - find the best (later-dl) CPU in the system
+ * @cp: the cpudl max-heap context
+ * @p: the task
+ * @later_mask: a mask to fill in with the selected CPUs (or NULL)
+ *
+ * Returns: int - best CPU (heap maximum if suitable)
+ */
+int cpudl_find(struct cpudl *cp, struct task_struct *p,
+ struct cpumask *later_mask)
+{
+ int best_cpu = -1;
+ const struct sched_dl_entity *dl_se = &p->dl;
+
+ if (later_mask && cpumask_and(later_mask, cp->free_cpus,
+ &p->cpus_allowed) && cpumask_and(later_mask,
+ later_mask, cpu_active_mask)) {
+ best_cpu = cpumask_any(later_mask);
+ goto out;
+ } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
+ dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
+ best_cpu = cpudl_maximum(cp);
+ if (later_mask)
+ cpumask_set_cpu(best_cpu, later_mask);
+ }
+
+out:
+ WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1);
+
+ return best_cpu;
+}
+
+/*
+ * cpudl_set - update the cpudl max-heap
+ * @cp: the cpudl max-heap context
+ * @cpu: the target cpu
+ * @dl: the new earliest deadline for this cpu
+ *
+ * Notes: assumes cpu_rq(cpu)->lock is locked
+ *
+ * Returns: (void)
+ */
+void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
+{
+ int old_idx, new_cpu;
+ unsigned long flags;
+
+ WARN_ON(cpu > num_present_cpus());
+
+ raw_spin_lock_irqsave(&cp->lock, flags);
+ old_idx = cp->cpu_to_idx[cpu];
+ if (!is_valid) {
+ /* remove item */
+ if (old_idx == IDX_INVALID) {
+ /*
+ * Nothing to remove if old_idx was invalid.
+ * This could happen if a rq_offline_dl is
+ * called for a CPU without -dl tasks running.
+ */
+ goto out;
+ }
+ new_cpu = cp->elements[cp->size - 1].cpu;
+ cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
+ cp->elements[old_idx].cpu = new_cpu;
+ cp->size--;
+ cp->cpu_to_idx[new_cpu] = old_idx;
+ cp->cpu_to_idx[cpu] = IDX_INVALID;
+ while (old_idx > 0 && dl_time_before(
+ cp->elements[parent(old_idx)].dl,
+ cp->elements[old_idx].dl)) {
+ cpudl_exchange(cp, old_idx, parent(old_idx));
+ old_idx = parent(old_idx);
+ }
+ cpumask_set_cpu(cpu, cp->free_cpus);
+ cpudl_heapify(cp, old_idx);
+
+ goto out;
+ }
+
+ if (old_idx == IDX_INVALID) {
+ cp->size++;
+ cp->elements[cp->size - 1].dl = 0;
+ cp->elements[cp->size - 1].cpu = cpu;
+ cp->cpu_to_idx[cpu] = cp->size - 1;
+ cpudl_change_key(cp, cp->size - 1, dl);
+ cpumask_clear_cpu(cpu, cp->free_cpus);
+ } else {
+ cpudl_change_key(cp, old_idx, dl);
+ }
+
+out:
+ raw_spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+/*
+ * cpudl_init - initialize the cpudl structure
+ * @cp: the cpudl max-heap context
+ */
+int cpudl_init(struct cpudl *cp)
+{
+ int i;
+
+ memset(cp, 0, sizeof(*cp));
+ raw_spin_lock_init(&cp->lock);
+ cp->size = 0;
+ for (i = 0; i < NR_CPUS; i++)
+ cp->cpu_to_idx[i] = IDX_INVALID;
+ if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_setall(cp->free_cpus);
+
+ return 0;
+}
+
+/*
+ * cpudl_cleanup - clean up the cpudl structure
+ * @cp: the cpudl max-heap context
+ */
+void cpudl_cleanup(struct cpudl *cp)
+{
+ /*
+ * nothing to do for the moment
+ */
+}
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
new file mode 100644
index 0000000..a202789
--- /dev/null
+++ b/kernel/sched/cpudeadline.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_CPUDL_H
+#define _LINUX_CPUDL_H
+
+#include <linux/sched.h>
+
+#define IDX_INVALID -1
+
+struct array_item {
+ u64 dl;
+ int cpu;
+};
+
+struct cpudl {
+ raw_spinlock_t lock;
+ int size;
+ int cpu_to_idx[NR_CPUS];
+ struct array_item elements[NR_CPUS];
+ cpumask_var_t free_cpus;
+};
+
+
+#ifdef CONFIG_SMP
+int cpudl_find(struct cpudl *cp, struct task_struct *p,
+ struct cpumask *later_mask);
+void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
+int cpudl_init(struct cpudl *cp);
+void cpudl_cleanup(struct cpudl *cp);
+#else
+#define cpudl_set(cp, cpu, dl) do { } while (0)
+#define cpudl_init() do { } while (0)
+#endif /* CONFIG_SMP */
+
+#endif /* _LINUX_CPUDL_H */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
new file mode 100644
index 0000000..0de2482
--- /dev/null
+++ b/kernel/sched/deadline.c
@@ -0,0 +1,1640 @@
+/*
+ * Deadline Scheduling Class (SCHED_DEADLINE)
+ *
+ * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
+ *
+ * Tasks that periodically executes their instances for less than their
+ * runtime won't miss any of their deadlines.
+ * Tasks that are not periodic or sporadic or that tries to execute more
+ * than their reserved bandwidth will be slowed down (and may potentially
+ * miss some of their deadlines), and won't affect any other task.
+ *
+ * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
+ * Juri Lelli <juri.lelli@gmail.com>,
+ * Michael Trimarchi <michael@amarulasolutions.com>,
+ * Fabio Checconi <fchecconi@gmail.com>
+ */
+#include "sched.h"
+
+#include <linux/slab.h>
+
+struct dl_bandwidth def_dl_bandwidth;
+
+static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
+{
+ return container_of(dl_se, struct task_struct, dl);
+}
+
+static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
+{
+ return container_of(dl_rq, struct rq, dl);
+}
+
+static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
+{
+ struct task_struct *p = dl_task_of(dl_se);
+ struct rq *rq = task_rq(p);
+
+ return &rq->dl;
+}
+
+static inline int on_dl_rq(struct sched_dl_entity *dl_se)
+{
+ return !RB_EMPTY_NODE(&dl_se->rb_node);
+}
+
+static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
+{
+ struct sched_dl_entity *dl_se = &p->dl;
+
+ return dl_rq->rb_leftmost == &dl_se->rb_node;
+}
+
+void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
+{
+ raw_spin_lock_init(&dl_b->dl_runtime_lock);
+ dl_b->dl_period = period;
+ dl_b->dl_runtime = runtime;
+}
+
+extern unsigned long to_ratio(u64 period, u64 runtime);
+
+void init_dl_bw(struct dl_bw *dl_b)
+{
+ raw_spin_lock_init(&dl_b->lock);
+ raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
+ if (global_rt_runtime() == RUNTIME_INF)
+ dl_b->bw = -1;
+ else
+ dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
+ raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
+ dl_b->total_bw = 0;
+}
+
+void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
+{
+ dl_rq->rb_root = RB_ROOT;
+
+#ifdef CONFIG_SMP
+ /* zero means no -deadline tasks */
+ dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
+
+ dl_rq->dl_nr_migratory = 0;
+ dl_rq->overloaded = 0;
+ dl_rq->pushable_dl_tasks_root = RB_ROOT;
+#else
+ init_dl_bw(&dl_rq->dl_bw);
+#endif
+}
+
+#ifdef CONFIG_SMP
+
+static inline int dl_overloaded(struct rq *rq)
+{
+ return atomic_read(&rq->rd->dlo_count);
+}
+
+static inline void dl_set_overload(struct rq *rq)
+{
+ if (!rq->online)
+ return;
+
+ cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
+ /*
+ * Must be visible before the overload count is
+ * set (as in sched_rt.c).
+ *
+ * Matched by the barrier in pull_dl_task().
+ */
+ smp_wmb();
+ atomic_inc(&rq->rd->dlo_count);
+}
+
+static inline void dl_clear_overload(struct rq *rq)
+{
+ if (!rq->online)
+ return;
+
+ atomic_dec(&rq->rd->dlo_count);
+ cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
+}
+
+static void update_dl_migration(struct dl_rq *dl_rq)
+{
+ if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) {
+ if (!dl_rq->overloaded) {
+ dl_set_overload(rq_of_dl_rq(dl_rq));
+ dl_rq->overloaded = 1;
+ }
+ } else if (dl_rq->overloaded) {
+ dl_clear_overload(rq_of_dl_rq(dl_rq));
+ dl_rq->overloaded = 0;
+ }
+}
+
+static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ struct task_struct *p = dl_task_of(dl_se);
+ dl_rq = &rq_of_dl_rq(dl_rq)->dl;
+
+ dl_rq->dl_nr_total++;
+ if (p->nr_cpus_allowed > 1)
+ dl_rq->dl_nr_migratory++;
+
+ update_dl_migration(dl_rq);
+}
+
+static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ struct task_struct *p = dl_task_of(dl_se);
+ dl_rq = &rq_of_dl_rq(dl_rq)->dl;
+
+ dl_rq->dl_nr_total--;
+ if (p->nr_cpus_allowed > 1)
+ dl_rq->dl_nr_migratory--;
+
+ update_dl_migration(dl_rq);
+}
+
+/*
+ * The list of pushable -deadline task is not a plist, like in
+ * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
+ */
+static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
+{
+ struct dl_rq *dl_rq = &rq->dl;
+ struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct task_struct *entry;
+ int leftmost = 1;
+
+ BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct task_struct,
+ pushable_dl_tasks);
+ if (dl_entity_preempt(&p->dl, &entry->dl))
+ link = &parent->rb_left;
+ else {
+ link = &parent->rb_right;
+ leftmost = 0;
+ }
+ }
+
+ if (leftmost)
+ dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
+
+ rb_link_node(&p->pushable_dl_tasks, parent, link);
+ rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
+}
+
+static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
+{
+ struct dl_rq *dl_rq = &rq->dl;
+
+ if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
+ return;
+
+ if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
+ struct rb_node *next_node;
+
+ next_node = rb_next(&p->pushable_dl_tasks);
+ dl_rq->pushable_dl_tasks_leftmost = next_node;
+ }
+
+ rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
+ RB_CLEAR_NODE(&p->pushable_dl_tasks);
+}
+
+static inline int has_pushable_dl_tasks(struct rq *rq)
+{
+ return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
+}
+
+static int push_dl_task(struct rq *rq);
+
+#else
+
+static inline
+void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline
+void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline
+void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+}
+
+static inline
+void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
+static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
+static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
+ int flags);
+
+/*
+ * We are being explicitly informed that a new instance is starting,
+ * and this means that:
+ * - the absolute deadline of the entity has to be placed at
+ * current time + relative deadline;
+ * - the runtime of the entity has to be set to the maximum value.
+ *
+ * The capability of specifying such event is useful whenever a -deadline
+ * entity wants to (try to!) synchronize its behaviour with the scheduler's
+ * one, and to (try to!) reconcile itself with its own scheduling
+ * parameters.
+ */
+static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
+ struct sched_dl_entity *pi_se)
+{
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
+
+ WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
+
+ /*
+ * We use the regular wall clock time to set deadlines in the
+ * future; in fact, we must consider execution overheads (time
+ * spent on hardirq context, etc.).
+ */
+ dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
+ dl_se->runtime = pi_se->dl_runtime;
+ dl_se->dl_new = 0;
+}
+
+/*
+ * Pure Earliest Deadline First (EDF) scheduling does not deal with the
+ * possibility of a entity lasting more than what it declared, and thus
+ * exhausting its runtime.
+ *
+ * Here we are interested in making runtime overrun possible, but we do
+ * not want a entity which is misbehaving to affect the scheduling of all
+ * other entities.
+ * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
+ * is used, in order to confine each entity within its own bandwidth.
+ *
+ * This function deals exactly with that, and ensures that when the runtime
+ * of a entity is replenished, its deadline is also postponed. That ensures
+ * the overrunning entity can't interfere with other entity in the system and
+ * can't make them miss their deadlines. Reasons why this kind of overruns
+ * could happen are, typically, a entity voluntarily trying to overcome its
+ * runtime, or it just underestimated it during sched_setscheduler_ex().
+ */
+static void replenish_dl_entity(struct sched_dl_entity *dl_se,
+ struct sched_dl_entity *pi_se)
+{
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
+
+ BUG_ON(pi_se->dl_runtime <= 0);
+
+ /*
+ * This could be the case for a !-dl task that is boosted.
+ * Just go with full inherited parameters.
+ */
+ if (dl_se->dl_deadline == 0) {
+ dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
+ dl_se->runtime = pi_se->dl_runtime;
+ }
+
+ /*
+ * We keep moving the deadline away until we get some
+ * available runtime for the entity. This ensures correct
+ * handling of situations where the runtime overrun is
+ * arbitrary large.
+ */
+ while (dl_se->runtime <= 0) {
+ dl_se->deadline += pi_se->dl_period;
+ dl_se->runtime += pi_se->dl_runtime;
+ }
+
+ /*
+ * At this point, the deadline really should be "in
+ * the future" with respect to rq->clock. If it's
+ * not, we are, for some reason, lagging too much!
+ * Anyway, after having warn userspace abut that,
+ * we still try to keep the things running by
+ * resetting the deadline and the budget of the
+ * entity.
+ */
+ if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
+ static bool lag_once = false;
+
+ if (!lag_once) {
+ lag_once = true;
+ printk_sched("sched: DL replenish lagged to much\n");
+ }
+ dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
+ dl_se->runtime = pi_se->dl_runtime;
+ }
+}
+
+/*
+ * Here we check if --at time t-- an entity (which is probably being
+ * [re]activated or, in general, enqueued) can use its remaining runtime
+ * and its current deadline _without_ exceeding the bandwidth it is
+ * assigned (function returns true if it can't). We are in fact applying
+ * one of the CBS rules: when a task wakes up, if the residual runtime
+ * over residual deadline fits within the allocated bandwidth, then we
+ * can keep the current (absolute) deadline and residual budget without
+ * disrupting the schedulability of the system. Otherwise, we should
+ * refill the runtime and set the deadline a period in the future,
+ * because keeping the current (absolute) deadline of the task would
+ * result in breaking guarantees promised to other tasks.
+ *
+ * This function returns true if:
+ *
+ * runtime / (deadline - t) > dl_runtime / dl_period ,
+ *
+ * IOW we can't recycle current parameters.
+ *
+ * Notice that the bandwidth check is done against the period. For
+ * task with deadline equal to period this is the same of using
+ * dl_deadline instead of dl_period in the equation above.
+ */
+static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
+ struct sched_dl_entity *pi_se, u64 t)
+{
+ u64 left, right;
+
+ /*
+ * left and right are the two sides of the equation above,
+ * after a bit of shuffling to use multiplications instead
+ * of divisions.
+ *
+ * Note that none of the time values involved in the two
+ * multiplications are absolute: dl_deadline and dl_runtime
+ * are the relative deadline and the maximum runtime of each
+ * instance, runtime is the runtime left for the last instance
+ * and (deadline - t), since t is rq->clock, is the time left
+ * to the (absolute) deadline. Even if overflowing the u64 type
+ * is very unlikely to occur in both cases, here we scale down
+ * as we want to avoid that risk at all. Scaling down by 10
+ * means that we reduce granularity to 1us. We are fine with it,
+ * since this is only a true/false check and, anyway, thinking
+ * of anything below microseconds resolution is actually fiction
+ * (but still we want to give the user that illusion >;).
+ */
+ left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+ right = ((dl_se->deadline - t) >> DL_SCALE) *
+ (pi_se->dl_runtime >> DL_SCALE);
+
+ return dl_time_before(right, left);
+}
+
+/*
+ * When a -deadline entity is queued back on the runqueue, its runtime and
+ * deadline might need updating.
+ *
+ * The policy here is that we update the deadline of the entity only if:
+ * - the current deadline is in the past,
+ * - using the remaining runtime with the current deadline would make
+ * the entity exceed its bandwidth.
+ */
+static void update_dl_entity(struct sched_dl_entity *dl_se,
+ struct sched_dl_entity *pi_se)
+{
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
+
+ /*
+ * The arrival of a new instance needs special treatment, i.e.,
+ * the actual scheduling parameters have to be "renewed".
+ */
+ if (dl_se->dl_new) {
+ setup_new_dl_entity(dl_se, pi_se);
+ return;
+ }
+
+ if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
+ dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
+ dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
+ dl_se->runtime = pi_se->dl_runtime;
+ }
+}
+
+/*
+ * If the entity depleted all its runtime, and if we want it to sleep
+ * while waiting for some new execution time to become available, we
+ * set the bandwidth enforcement timer to the replenishment instant
+ * and try to activate it.
+ *
+ * Notice that it is important for the caller to know if the timer
+ * actually started or not (i.e., the replenishment instant is in
+ * the future or in the past).
+ */
+static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
+{
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
+ ktime_t now, act;
+ ktime_t soft, hard;
+ unsigned long range;
+ s64 delta;
+
+ if (boosted)
+ return 0;
+ /*
+ * We want the timer to fire at the deadline, but considering
+ * that it is actually coming from rq->clock and not from
+ * hrtimer's time base reading.
+ */
+ act = ns_to_ktime(dl_se->deadline);
+ now = hrtimer_cb_get_time(&dl_se->dl_timer);
+ delta = ktime_to_ns(now) - rq_clock(rq);
+ act = ktime_add_ns(act, delta);
+
+ /*
+ * If the expiry time already passed, e.g., because the value
+ * chosen as the deadline is too small, don't even try to
+ * start the timer in the past!
+ */
+ if (ktime_us_delta(act, now) < 0)
+ return 0;
+
+ hrtimer_set_expires(&dl_se->dl_timer, act);
+
+ soft = hrtimer_get_softexpires(&dl_se->dl_timer);
+ hard = hrtimer_get_expires(&dl_se->dl_timer);
+ range = ktime_to_ns(ktime_sub(hard, soft));
+ __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
+ range, HRTIMER_MODE_ABS, 0);
+
+ return hrtimer_active(&dl_se->dl_timer);
+}
+
+/*
+ * This is the bandwidth enforcement timer callback. If here, we know
+ * a task is not on its dl_rq, since the fact that the timer was running
+ * means the task is throttled and needs a runtime replenishment.
+ *
+ * However, what we actually do depends on the fact the task is active,
+ * (it is on its rq) or has been removed from there by a call to
+ * dequeue_task_dl(). In the former case we must issue the runtime
+ * replenishment and add the task back to the dl_rq; in the latter, we just
+ * do nothing but clearing dl_throttled, so that runtime and deadline
+ * updating (and the queueing back to dl_rq) will be done by the
+ * next call to enqueue_task_dl().
+ */
+static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
+{
+ struct sched_dl_entity *dl_se = container_of(timer,
+ struct sched_dl_entity,
+ dl_timer);
+ struct task_struct *p = dl_task_of(dl_se);
+ struct rq *rq = task_rq(p);
+ raw_spin_lock(&rq->lock);
+
+ /*
+ * We need to take care of a possible races here. In fact, the
+ * task might have changed its scheduling policy to something
+ * different from SCHED_DEADLINE or changed its reservation
+ * parameters (through sched_setscheduler()).
+ */
+ if (!dl_task(p) || dl_se->dl_new)
+ goto unlock;
+
+ sched_clock_tick();
+ update_rq_clock(rq);
+ dl_se->dl_throttled = 0;
+ if (p->on_rq) {
+ enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
+ if (task_has_dl_policy(rq->curr))
+ check_preempt_curr_dl(rq, p, 0);
+ else
+ resched_task(rq->curr);
+#ifdef CONFIG_SMP
+ /*
+ * Queueing this task back might have overloaded rq,
+ * check if we need to kick someone away.
+ */
+ if (has_pushable_dl_tasks(rq))
+ push_dl_task(rq);
+#endif
+ }
+unlock:
+ raw_spin_unlock(&rq->lock);
+
+ return HRTIMER_NORESTART;
+}
+
+void init_dl_task_timer(struct sched_dl_entity *dl_se)
+{
+ struct hrtimer *timer = &dl_se->dl_timer;
+
+ if (hrtimer_active(timer)) {
+ hrtimer_try_to_cancel(timer);
+ return;
+ }
+
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer->function = dl_task_timer;
+}
+
+static
+int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
+{
+ int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
+ int rorun = dl_se->runtime <= 0;
+
+ if (!rorun && !dmiss)
+ return 0;
+
+ /*
+ * If we are beyond our current deadline and we are still
+ * executing, then we have already used some of the runtime of
+ * the next instance. Thus, if we do not account that, we are
+ * stealing bandwidth from the system at each deadline miss!
+ */
+ if (dmiss) {
+ dl_se->runtime = rorun ? dl_se->runtime : 0;
+ dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
+ }
+
+ return 1;
+}
+
+/*
+ * Update the current task's runtime statistics (provided it is still
+ * a -deadline task and has not been removed from the dl_rq).
+ */
+static void update_curr_dl(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ struct sched_dl_entity *dl_se = &curr->dl;
+ u64 delta_exec;
+
+ if (!dl_task(curr) || !on_dl_rq(dl_se))
+ return;
+
+ /*
+ * Consumed budget is computed considering the time as
+ * observed by schedulable tasks (excluding time spent
+ * in hardirq context, etc.). Deadlines are instead
+ * computed using hard walltime. This seems to be the more
+ * natural solution, but the full ramifications of this
+ * approach need further study.
+ */
+ delta_exec = rq_clock_task(rq) - curr->se.exec_start;
+ if (unlikely((s64)delta_exec < 0))
+ delta_exec = 0;
+
+ schedstat_set(curr->se.statistics.exec_max,
+ max(curr->se.statistics.exec_max, delta_exec));
+
+ curr->se.sum_exec_runtime += delta_exec;
+ account_group_exec_runtime(curr, delta_exec);
+
+ curr->se.exec_start = rq_clock_task(rq);
+ cpuacct_charge(curr, delta_exec);
+
+ sched_rt_avg_update(rq, delta_exec);
+
+ dl_se->runtime -= delta_exec;
+ if (dl_runtime_exceeded(rq, dl_se)) {
+ __dequeue_task_dl(rq, curr, 0);
+ if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
+ dl_se->dl_throttled = 1;
+ else
+ enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
+
+ if (!is_leftmost(curr, &rq->dl))
+ resched_task(curr);
+ }
+
+ /*
+ * Because -- for now -- we share the rt bandwidth, we need to
+ * account our runtime there too, otherwise actual rt tasks
+ * would be able to exceed the shared quota.
+ *
+ * Account to the root rt group for now.
+ *
+ * The solution we're working towards is having the RT groups scheduled
+ * using deadline servers -- however there's a few nasties to figure
+ * out before that can happen.
+ */
+ if (rt_bandwidth_enabled()) {
+ struct rt_rq *rt_rq = &rq->rt;
+
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
+ rt_rq->rt_time += delta_exec;
+ /*
+ * We'll let actual RT tasks worry about the overflow here, we
+ * have our own CBS to keep us inline -- see above.
+ */
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ }
+}
+
+#ifdef CONFIG_SMP
+
+static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
+
+static inline u64 next_deadline(struct rq *rq)
+{
+ struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
+
+ if (next && dl_prio(next->prio))
+ return next->dl.deadline;
+ else
+ return 0;
+}
+
+static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
+{
+ struct rq *rq = rq_of_dl_rq(dl_rq);
+
+ if (dl_rq->earliest_dl.curr == 0 ||
+ dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
+ /*
+ * If the dl_rq had no -deadline tasks, or if the new task
+ * has shorter deadline than the current one on dl_rq, we
+ * know that the previous earliest becomes our next earliest,
+ * as the new task becomes the earliest itself.
+ */
+ dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
+ dl_rq->earliest_dl.curr = deadline;
+ cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
+ } else if (dl_rq->earliest_dl.next == 0 ||
+ dl_time_before(deadline, dl_rq->earliest_dl.next)) {
+ /*
+ * On the other hand, if the new -deadline task has a
+ * a later deadline than the earliest one on dl_rq, but
+ * it is earlier than the next (if any), we must
+ * recompute the next-earliest.
+ */
+ dl_rq->earliest_dl.next = next_deadline(rq);
+ }
+}
+
+static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
+{
+ struct rq *rq = rq_of_dl_rq(dl_rq);
+
+ /*
+ * Since we may have removed our earliest (and/or next earliest)
+ * task we must recompute them.
+ */
+ if (!dl_rq->dl_nr_running) {
+ dl_rq->earliest_dl.curr = 0;
+ dl_rq->earliest_dl.next = 0;
+ cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
+ } else {
+ struct rb_node *leftmost = dl_rq->rb_leftmost;
+ struct sched_dl_entity *entry;
+
+ entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
+ dl_rq->earliest_dl.curr = entry->deadline;
+ dl_rq->earliest_dl.next = next_deadline(rq);
+ cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
+ }
+}
+
+#else
+
+static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
+static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
+
+#endif /* CONFIG_SMP */
+
+static inline
+void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ int prio = dl_task_of(dl_se)->prio;
+ u64 deadline = dl_se->deadline;
+
+ WARN_ON(!dl_prio(prio));
+ dl_rq->dl_nr_running++;
+
+ inc_dl_deadline(dl_rq, deadline);
+ inc_dl_migration(dl_se, dl_rq);
+}
+
+static inline
+void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ int prio = dl_task_of(dl_se)->prio;
+
+ WARN_ON(!dl_prio(prio));
+ WARN_ON(!dl_rq->dl_nr_running);
+ dl_rq->dl_nr_running--;
+
+ dec_dl_deadline(dl_rq, dl_se->deadline);
+ dec_dl_migration(dl_se, dl_rq);
+}
+
+static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
+{
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ struct rb_node **link = &dl_rq->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct sched_dl_entity *entry;
+ int leftmost = 1;
+
+ BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct sched_dl_entity, rb_node);
+ if (dl_time_before(dl_se->deadline, entry->deadline))
+ link = &parent->rb_left;
+ else {
+ link = &parent->rb_right;
+ leftmost = 0;
+ }
+ }
+
+ if (leftmost)
+ dl_rq->rb_leftmost = &dl_se->rb_node;
+
+ rb_link_node(&dl_se->rb_node, parent, link);
+ rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
+
+ inc_dl_tasks(dl_se, dl_rq);
+}
+
+static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
+{
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+
+ if (RB_EMPTY_NODE(&dl_se->rb_node))
+ return;
+
+ if (dl_rq->rb_leftmost == &dl_se->rb_node) {
+ struct rb_node *next_node;
+
+ next_node = rb_next(&dl_se->rb_node);
+ dl_rq->rb_leftmost = next_node;
+ }
+
+ rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
+ RB_CLEAR_NODE(&dl_se->rb_node);
+
+ dec_dl_tasks(dl_se, dl_rq);
+}
+
+static void
+enqueue_dl_entity(struct sched_dl_entity *dl_se,
+ struct sched_dl_entity *pi_se, int flags)
+{
+ BUG_ON(on_dl_rq(dl_se));
+
+ /*
+ * If this is a wakeup or a new instance, the scheduling
+ * parameters of the task might need updating. Otherwise,
+ * we want a replenishment of its runtime.
+ */
+ if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
+ replenish_dl_entity(dl_se, pi_se);
+ else
+ update_dl_entity(dl_se, pi_se);
+
+ __enqueue_dl_entity(dl_se);
+}
+
+static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
+{
+ __dequeue_dl_entity(dl_se);
+}
+
+static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+{
+ struct task_struct *pi_task = rt_mutex_get_top_task(p);
+ struct sched_dl_entity *pi_se = &p->dl;
+
+ /*
+ * Use the scheduling parameters of the top pi-waiter
+ * task if we have one and its (relative) deadline is
+ * smaller than our one... OTW we keep our runtime and
+ * deadline.
+ */
+ if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
+ pi_se = &pi_task->dl;
+
+ /*
+ * If p is throttled, we do nothing. In fact, if it exhausted
+ * its budget it needs a replenishment and, since it now is on
+ * its rq, the bandwidth timer callback (which clearly has not
+ * run yet) will take care of this.
+ */
+ if (p->dl.dl_throttled)
+ return;
+
+ enqueue_dl_entity(&p->dl, pi_se, flags);
+
+ if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
+ enqueue_pushable_dl_task(rq, p);
+
+ inc_nr_running(rq);
+}
+
+static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+{
+ dequeue_dl_entity(&p->dl);
+ dequeue_pushable_dl_task(rq, p);
+}
+
+static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+{
+ update_curr_dl(rq);
+ __dequeue_task_dl(rq, p, flags);
+
+ dec_nr_running(rq);
+}
+
+/*
+ * Yield task semantic for -deadline tasks is:
+ *
+ * get off from the CPU until our next instance, with
+ * a new runtime. This is of little use now, since we
+ * don't have a bandwidth reclaiming mechanism. Anyway,
+ * bandwidth reclaiming is planned for the future, and
+ * yield_task_dl will indicate that some spare budget
+ * is available for other task instances to use it.
+ */
+static void yield_task_dl(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ /*
+ * We make the task go to sleep until its current deadline by
+ * forcing its runtime to zero. This way, update_curr_dl() stops
+ * it and the bandwidth timer will wake it up and will give it
+ * new scheduling parameters (thanks to dl_new=1).
+ */
+ if (p->dl.runtime > 0) {
+ rq->curr->dl.dl_new = 1;
+ p->dl.runtime = 0;
+ }
+ update_curr_dl(rq);
+}
+
+#ifdef CONFIG_SMP
+
+static int find_later_rq(struct task_struct *task);
+
+static int
+select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
+{
+ struct task_struct *curr;
+ struct rq *rq;
+
+ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
+ goto out;
+
+ rq = cpu_rq(cpu);
+
+ rcu_read_lock();
+ curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+
+ /*
+ * If we are dealing with a -deadline task, we must
+ * decide where to wake it up.
+ * If it has a later deadline and the current task
+ * on this rq can't move (provided the waking task
+ * can!) we prefer to send it somewhere else. On the
+ * other hand, if it has a shorter deadline, we
+ * try to make it stay here, it might be important.
+ */
+ if (unlikely(dl_task(curr)) &&
+ (curr->nr_cpus_allowed < 2 ||
+ !dl_entity_preempt(&p->dl, &curr->dl)) &&
+ (p->nr_cpus_allowed > 1)) {
+ int target = find_later_rq(p);
+
+ if (target != -1)
+ cpu = target;
+ }
+ rcu_read_unlock();
+
+out:
+ return cpu;
+}
+
+static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
+{
+ /*
+ * Current can't be migrated, useless to reschedule,
+ * let's hope p can move out.
+ */
+ if (rq->curr->nr_cpus_allowed == 1 ||
+ cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
+ return;
+
+ /*
+ * p is migratable, so let's not schedule it and
+ * see if it is pushed or pulled somewhere else.
+ */
+ if (p->nr_cpus_allowed != 1 &&
+ cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
+ return;
+
+ resched_task(rq->curr);
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Only called when both the current and waking task are -deadline
+ * tasks.
+ */
+static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
+ int flags)
+{
+ if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
+ resched_task(rq->curr);
+ return;
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * In the unlikely case current and p have the same deadline
+ * let us try to decide what's the best thing to do...
+ */
+ if ((p->dl.deadline == rq->curr->dl.deadline) &&
+ !test_tsk_need_resched(rq->curr))
+ check_preempt_equal_dl(rq, p);
+#endif /* CONFIG_SMP */
+}
+
+#ifdef CONFIG_SCHED_HRTICK
+static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
+{
+ s64 delta = p->dl.dl_runtime - p->dl.runtime;
+
+ if (delta > 10000)
+ hrtick_start(rq, p->dl.runtime);
+}
+#endif
+
+static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
+ struct dl_rq *dl_rq)
+{
+ struct rb_node *left = dl_rq->rb_leftmost;
+
+ if (!left)
+ return NULL;
+
+ return rb_entry(left, struct sched_dl_entity, rb_node);
+}
+
+struct task_struct *pick_next_task_dl(struct rq *rq)
+{
+ struct sched_dl_entity *dl_se;
+ struct task_struct *p;
+ struct dl_rq *dl_rq;
+
+ dl_rq = &rq->dl;
+
+ if (unlikely(!dl_rq->dl_nr_running))
+ return NULL;
+
+ dl_se = pick_next_dl_entity(rq, dl_rq);
+ BUG_ON(!dl_se);
+
+ p = dl_task_of(dl_se);
+ p->se.exec_start = rq_clock_task(rq);
+
+ /* Running task will never be pushed. */
+ dequeue_pushable_dl_task(rq, p);
+
+#ifdef CONFIG_SCHED_HRTICK
+ if (hrtick_enabled(rq))
+ start_hrtick_dl(rq, p);
+#endif
+
+#ifdef CONFIG_SMP
+ rq->post_schedule = has_pushable_dl_tasks(rq);
+#endif /* CONFIG_SMP */
+
+ return p;
+}
+
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
+{
+ update_curr_dl(rq);
+
+ if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
+ enqueue_pushable_dl_task(rq, p);
+}
+
+static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
+{
+ update_curr_dl(rq);
+
+#ifdef CONFIG_SCHED_HRTICK
+ if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
+ start_hrtick_dl(rq, p);
+#endif
+}
+
+static void task_fork_dl(struct task_struct *p)
+{
+ /*
+ * SCHED_DEADLINE tasks cannot fork and this is achieved through
+ * sched_fork()
+ */
+}
+
+static void task_dead_dl(struct task_struct *p)
+{
+ struct hrtimer *timer = &p->dl.dl_timer;
+ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+
+ /*
+ * Since we are TASK_DEAD we won't slip out of the domain!
+ */
+ raw_spin_lock_irq(&dl_b->lock);
+ dl_b->total_bw -= p->dl.dl_bw;
+ raw_spin_unlock_irq(&dl_b->lock);
+
+ hrtimer_cancel(timer);
+}
+
+static void set_curr_task_dl(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ p->se.exec_start = rq_clock_task(rq);
+
+ /* You can't push away the running task */
+ dequeue_pushable_dl_task(rq, p);
+}
+
+#ifdef CONFIG_SMP
+
+/* Only try algorithms three times */
+#define DL_MAX_TRIES 3
+
+static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
+{
+ if (!task_running(rq, p) &&
+ (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
+ (p->nr_cpus_allowed > 1))
+ return 1;
+
+ return 0;
+}
+
+/* Returns the second earliest -deadline task, NULL otherwise */
+static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
+{
+ struct rb_node *next_node = rq->dl.rb_leftmost;
+ struct sched_dl_entity *dl_se;
+ struct task_struct *p = NULL;
+
+next_node:
+ next_node = rb_next(next_node);
+ if (next_node) {
+ dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
+ p = dl_task_of(dl_se);
+
+ if (pick_dl_task(rq, p, cpu))
+ return p;
+
+ goto next_node;
+ }
+
+ return NULL;
+}
+
+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
+
+static int find_later_rq(struct task_struct *task)
+{
+ struct sched_domain *sd;
+ struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
+ int this_cpu = smp_processor_id();
+ int best_cpu, cpu = task_cpu(task);
+
+ /* Make sure the mask is initialized first */
+ if (unlikely(!later_mask))
+ return -1;
+
+ if (task->nr_cpus_allowed == 1)
+ return -1;
+
+ best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
+ task, later_mask);
+ if (best_cpu == -1)
+ return -1;
+
+ /*
+ * If we are here, some target has been found,
+ * the most suitable of which is cached in best_cpu.
+ * This is, among the runqueues where the current tasks
+ * have later deadlines than the task's one, the rq
+ * with the latest possible one.
+ *
+ * Now we check how well this matches with task's
+ * affinity and system topology.
+ *
+ * The last cpu where the task run is our first
+ * guess, since it is most likely cache-hot there.
+ */
+ if (cpumask_test_cpu(cpu, later_mask))
+ return cpu;
+ /*
+ * Check if this_cpu is to be skipped (i.e., it is
+ * not in the mask) or not.
+ */
+ if (!cpumask_test_cpu(this_cpu, later_mask))
+ this_cpu = -1;
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_AFFINE) {
+
+ /*
+ * If possible, preempting this_cpu is
+ * cheaper than migrating.
+ */
+ if (this_cpu != -1 &&
+ cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
+ rcu_read_unlock();
+ return this_cpu;
+ }
+
+ /*
+ * Last chance: if best_cpu is valid and is
+ * in the mask, that becomes our choice.
+ */
+ if (best_cpu < nr_cpu_ids &&
+ cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
+ rcu_read_unlock();
+ return best_cpu;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * At this point, all our guesses failed, we just return
+ * 'something', and let the caller sort the things out.
+ */
+ if (this_cpu != -1)
+ return this_cpu;
+
+ cpu = cpumask_any(later_mask);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+
+ return -1;
+}
+
+/* Locks the rq it finds */
+static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+{
+ struct rq *later_rq = NULL;
+ int tries;
+ int cpu;
+
+ for (tries = 0; tries < DL_MAX_TRIES; tries++) {
+ cpu = find_later_rq(task);
+
+ if ((cpu == -1) || (cpu == rq->cpu))
+ break;
+
+ later_rq = cpu_rq(cpu);
+
+ /* Retry if something changed. */
+ if (double_lock_balance(rq, later_rq)) {
+ if (unlikely(task_rq(task) != rq ||
+ !cpumask_test_cpu(later_rq->cpu,
+ &task->cpus_allowed) ||
+ task_running(rq, task) || !task->on_rq)) {
+ double_unlock_balance(rq, later_rq);
+ later_rq = NULL;
+ break;
+ }
+ }
+
+ /*
+ * If the rq we found has no -deadline task, or
+ * its earliest one has a later deadline than our
+ * task, the rq is a good one.
+ */
+ if (!later_rq->dl.dl_nr_running ||
+ dl_time_before(task->dl.deadline,
+ later_rq->dl.earliest_dl.curr))
+ break;
+
+ /* Otherwise we try again. */
+ double_unlock_balance(rq, later_rq);
+ later_rq = NULL;
+ }
+
+ return later_rq;
+}
+
+static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
+{
+ struct task_struct *p;
+
+ if (!has_pushable_dl_tasks(rq))
+ return NULL;
+
+ p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
+ struct task_struct, pushable_dl_tasks);
+
+ BUG_ON(rq->cpu != task_cpu(p));
+ BUG_ON(task_current(rq, p));
+ BUG_ON(p->nr_cpus_allowed <= 1);
+
+ BUG_ON(!p->on_rq);
+ BUG_ON(!dl_task(p));
+
+ return p;
+}
+
+/*
+ * See if the non running -deadline tasks on this rq
+ * can be sent to some other CPU where they can preempt
+ * and start executing.
+ */
+static int push_dl_task(struct rq *rq)
+{
+ struct task_struct *next_task;
+ struct rq *later_rq;
+
+ if (!rq->dl.overloaded)
+ return 0;
+
+ next_task = pick_next_pushable_dl_task(rq);
+ if (!next_task)
+ return 0;
+
+retry:
+ if (unlikely(next_task == rq->curr)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ /*
+ * If next_task preempts rq->curr, and rq->curr
+ * can move away, it makes sense to just reschedule
+ * without going further in pushing next_task.
+ */
+ if (dl_task(rq->curr) &&
+ dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
+ rq->curr->nr_cpus_allowed > 1) {
+ resched_task(rq->curr);
+ return 0;
+ }
+
+ /* We might release rq lock */
+ get_task_struct(next_task);
+
+ /* Will lock the rq it'll find */
+ later_rq = find_lock_later_rq(next_task, rq);
+ if (!later_rq) {
+ struct task_struct *task;
+
+ /*
+ * We must check all this again, since
+ * find_lock_later_rq releases rq->lock and it is
+ * then possible that next_task has migrated.
+ */
+ task = pick_next_pushable_dl_task(rq);
+ if (task_cpu(next_task) == rq->cpu && task == next_task) {
+ /*
+ * The task is still there. We don't try
+ * again, some other cpu will pull it when ready.
+ */
+ dequeue_pushable_dl_task(rq, next_task);
+ goto out;
+ }
+
+ if (!task)
+ /* No more tasks */
+ goto out;
+
+ put_task_struct(next_task);
+ next_task = task;
+ goto retry;
+ }
+
+ deactivate_task(rq, next_task, 0);
+ set_task_cpu(next_task, later_rq->cpu);
+ activate_task(later_rq, next_task, 0);
+
+ resched_task(later_rq->curr);
+
+ double_unlock_balance(rq, later_rq);
+
+out:
+ put_task_struct(next_task);
+
+ return 1;
+}
+
+static void push_dl_tasks(struct rq *rq)
+{
+ /* Terminates as it moves a -deadline task */
+ while (push_dl_task(rq))
+ ;
+}
+
+static int pull_dl_task(struct rq *this_rq)
+{
+ int this_cpu = this_rq->cpu, ret = 0, cpu;
+ struct task_struct *p;
+ struct rq *src_rq;
+ u64 dmin = LONG_MAX;
+
+ if (likely(!dl_overloaded(this_rq)))
+ return 0;
+
+ /*
+ * Match the barrier from dl_set_overloaded; this guarantees that if we
+ * see overloaded we must also see the dlo_mask bit.
+ */
+ smp_rmb();
+
+ for_each_cpu(cpu, this_rq->rd->dlo_mask) {
+ if (this_cpu == cpu)
+ continue;
+
+ src_rq = cpu_rq(cpu);
+
+ /*
+ * It looks racy, abd it is! However, as in sched_rt.c,
+ * we are fine with this.
+ */
+ if (this_rq->dl.dl_nr_running &&
+ dl_time_before(this_rq->dl.earliest_dl.curr,
+ src_rq->dl.earliest_dl.next))
+ continue;
+
+ /* Might drop this_rq->lock */
+ double_lock_balance(this_rq, src_rq);
+
+ /*
+ * If there are no more pullable tasks on the
+ * rq, we're done with it.
+ */
+ if (src_rq->dl.dl_nr_running <= 1)
+ goto skip;
+
+ p = pick_next_earliest_dl_task(src_rq, this_cpu);
+
+ /*
+ * We found a task to be pulled if:
+ * - it preempts our current (if there's one),
+ * - it will preempt the last one we pulled (if any).
+ */
+ if (p && dl_time_before(p->dl.deadline, dmin) &&
+ (!this_rq->dl.dl_nr_running ||
+ dl_time_before(p->dl.deadline,
+ this_rq->dl.earliest_dl.curr))) {
+ WARN_ON(p == src_rq->curr);
+ WARN_ON(!p->on_rq);
+
+ /*
+ * Then we pull iff p has actually an earlier
+ * deadline than the current task of its runqueue.
+ */
+ if (dl_time_before(p->dl.deadline,
+ src_rq->curr->dl.deadline))
+ goto skip;
+
+ ret = 1;
+
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, this_cpu);
+ activate_task(this_rq, p, 0);
+ dmin = p->dl.deadline;
+
+ /* Is there any other task even earlier? */
+ }
+skip:
+ double_unlock_balance(this_rq, src_rq);
+ }
+
+ return ret;
+}
+
+static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
+{
+ /* Try to pull other tasks here */
+ if (dl_task(prev))
+ pull_dl_task(rq);
+}
+
+static void post_schedule_dl(struct rq *rq)
+{
+ push_dl_tasks(rq);
+}
+
+/*
+ * Since the task is not running and a reschedule is not going to happen
+ * anytime soon on its runqueue, we try pushing it away now.
+ */
+static void task_woken_dl(struct rq *rq, struct task_struct *p)
+{
+ if (!task_running(rq, p) &&
+ !test_tsk_need_resched(rq->curr) &&
+ has_pushable_dl_tasks(rq) &&
+ p->nr_cpus_allowed > 1 &&
+ dl_task(rq->curr) &&
+ (rq->curr->nr_cpus_allowed < 2 ||
+ dl_entity_preempt(&rq->curr->dl, &p->dl))) {
+ push_dl_tasks(rq);
+ }
+}
+
+static void set_cpus_allowed_dl(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+ struct rq *rq;
+ int weight;
+
+ BUG_ON(!dl_task(p));
+
+ /*
+ * Update only if the task is actually running (i.e.,
+ * it is on the rq AND it is not throttled).
+ */
+ if (!on_dl_rq(&p->dl))
+ return;
+
+ weight = cpumask_weight(new_mask);
+
+ /*
+ * Only update if the process changes its state from whether it
+ * can migrate or not.
+ */
+ if ((p->nr_cpus_allowed > 1) == (weight > 1))
+ return;
+
+ rq = task_rq(p);
+
+ /*
+ * The process used to be able to migrate OR it can now migrate
+ */
+ if (weight <= 1) {
+ if (!task_current(rq, p))
+ dequeue_pushable_dl_task(rq, p);
+ BUG_ON(!rq->dl.dl_nr_migratory);
+ rq->dl.dl_nr_migratory--;
+ } else {
+ if (!task_current(rq, p))
+ enqueue_pushable_dl_task(rq, p);
+ rq->dl.dl_nr_migratory++;
+ }
+
+ update_dl_migration(&rq->dl);
+}
+
+/* Assumes rq->lock is held */
+static void rq_online_dl(struct rq *rq)
+{
+ if (rq->dl.overloaded)
+ dl_set_overload(rq);
+
+ if (rq->dl.dl_nr_running > 0)
+ cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
+}
+
+/* Assumes rq->lock is held */
+static void rq_offline_dl(struct rq *rq)
+{
+ if (rq->dl.overloaded)
+ dl_clear_overload(rq);
+
+ cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
+}
+
+void init_sched_dl_class(void)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i)
+ zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
+ GFP_KERNEL, cpu_to_node(i));
+}
+
+#endif /* CONFIG_SMP */
+
+static void switched_from_dl(struct rq *rq, struct task_struct *p)
+{
+ if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
+ hrtimer_try_to_cancel(&p->dl.dl_timer);
+
+#ifdef CONFIG_SMP
+ /*
+ * Since this might be the only -deadline task on the rq,
+ * this is the right place to try to pull some other one
+ * from an overloaded cpu, if any.
+ */
+ if (!rq->dl.dl_nr_running)
+ pull_dl_task(rq);
+#endif
+}
+
+/*
+ * When switching to -deadline, we may overload the rq, then
+ * we try to push someone off, if possible.
+ */
+static void switched_to_dl(struct rq *rq, struct task_struct *p)
+{
+ int check_resched = 1;
+
+ /*
+ * If p is throttled, don't consider the possibility
+ * of preempting rq->curr, the check will be done right
+ * after its runtime will get replenished.
+ */
+ if (unlikely(p->dl.dl_throttled))
+ return;
+
+ if (p->on_rq || rq->curr != p) {
+#ifdef CONFIG_SMP
+ if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
+ /* Only reschedule if pushing failed */
+ check_resched = 0;
+#endif /* CONFIG_SMP */
+ if (check_resched && task_has_dl_policy(rq->curr))
+ check_preempt_curr_dl(rq, p, 0);
+ }
+}
+
+/*
+ * If the scheduling parameters of a -deadline task changed,
+ * a push or pull operation might be needed.
+ */
+static void prio_changed_dl(struct rq *rq, struct task_struct *p,
+ int oldprio)
+{
+ if (p->on_rq || rq->curr == p) {
+#ifdef CONFIG_SMP
+ /*
+ * This might be too much, but unfortunately
+ * we don't have the old deadline value, and
+ * we can't argue if the task is increasing
+ * or lowering its prio, so...
+ */
+ if (!rq->dl.overloaded)
+ pull_dl_task(rq);
+
+ /*
+ * If we now have a earlier deadline task than p,
+ * then reschedule, provided p is still on this
+ * runqueue.
+ */
+ if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
+ rq->curr == p)
+ resched_task(p);
+#else
+ /*
+ * Again, we don't know if p has a earlier
+ * or later deadline, so let's blindly set a
+ * (maybe not needed) rescheduling point.
+ */
+ resched_task(p);
+#endif /* CONFIG_SMP */
+ } else
+ switched_to_dl(rq, p);
+}
+
+const struct sched_class dl_sched_class = {
+ .next = &rt_sched_class,
+ .enqueue_task = enqueue_task_dl,
+ .dequeue_task = dequeue_task_dl,
+ .yield_task = yield_task_dl,
+
+ .check_preempt_curr = check_preempt_curr_dl,
+
+ .pick_next_task = pick_next_task_dl,
+ .put_prev_task = put_prev_task_dl,
+
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_dl,
+ .set_cpus_allowed = set_cpus_allowed_dl,
+ .rq_online = rq_online_dl,
+ .rq_offline = rq_offline_dl,
+ .pre_schedule = pre_schedule_dl,
+ .post_schedule = post_schedule_dl,
+ .task_woken = task_woken_dl,
+#endif
+
+ .set_curr_task = set_curr_task_dl,
+ .task_tick = task_tick_dl,
+ .task_fork = task_fork_dl,
+ .task_dead = task_dead_dl,
+
+ .prio_changed = prio_changed_dl,
+ .switched_from = switched_from_dl,
+ .switched_to = switched_to_dl,
+};
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 5c34d18..dd52e7f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -139,7 +139,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
#endif
#ifdef CONFIG_NUMA_BALANCING
- SEQ_printf(m, " %d", cpu_to_node(task_cpu(p)));
+ SEQ_printf(m, " %d", task_node(p));
#endif
#ifdef CONFIG_CGROUP_SCHED
SEQ_printf(m, " %s", task_group_path(task_group(p)));
@@ -371,7 +371,7 @@ static void sched_debug_header(struct seq_file *m)
PN(cpu_clk);
P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
- P(sched_clock_stable);
+ P(sched_clock_stable());
#endif
#undef PN
#undef P
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c7395d9..b24b6cf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -872,15 +872,6 @@ static unsigned int task_scan_max(struct task_struct *p)
return max(smin, smax);
}
-/*
- * Once a preferred node is selected the scheduler balancer will prefer moving
- * a task to that node for sysctl_numa_balancing_settle_count number of PTE
- * scans. This will give the process the chance to accumulate more faults on
- * the preferred node but still allow the scheduler to move the task again if
- * the nodes CPUs are overloaded.
- */
-unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
-
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
rq->nr_numa_running += (p->numa_preferred_nid != -1);
@@ -930,7 +921,8 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
if (!p->numa_group)
return 0;
- return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1];
+ return p->numa_group->faults[task_faults_idx(nid, 0)] +
+ p->numa_group->faults[task_faults_idx(nid, 1)];
}
/*
@@ -1023,7 +1015,7 @@ struct task_numa_env {
struct numa_stats src_stats, dst_stats;
- int imbalance_pct, idx;
+ int imbalance_pct;
struct task_struct *best_task;
long best_imp;
@@ -1211,7 +1203,7 @@ static int task_numa_migrate(struct task_struct *p)
* elsewhere, so there is no point in (re)trying.
*/
if (unlikely(!sd)) {
- p->numa_preferred_nid = cpu_to_node(task_cpu(p));
+ p->numa_preferred_nid = task_node(p);
return -EINVAL;
}
@@ -1278,7 +1270,7 @@ static void numa_migrate_preferred(struct task_struct *p)
p->numa_migrate_retry = jiffies + HZ;
/* Success if task is already running on preferred CPU */
- if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
+ if (task_node(p) == p->numa_preferred_nid)
return;
/* Otherwise, try migrate to a CPU on the preferred node */
@@ -1350,7 +1342,6 @@ static void update_task_scan_period(struct task_struct *p,
* scanning faster if shared accesses dominate as it may
* simply bounce migrations uselessly
*/
- period_slot = DIV_ROUND_UP(diff, NUMA_PERIOD_SLOTS);
ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
}
@@ -3923,7 +3914,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- if (!tg->parent || !wl) /* the trivial, non-cgroup case */
+ if (!tg->parent) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {
@@ -4101,12 +4092,16 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int load_idx)
+ int this_cpu, int sd_flag)
{
struct sched_group *idlest = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
+ int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
+ if (sd_flag & SD_BALANCE_WAKE)
+ load_idx = sd->wake_idx;
+
do {
unsigned long load, avg_load;
int local_group;
@@ -4274,7 +4269,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
}
while (sd) {
- int load_idx = sd->forkexec_idx;
struct sched_group *group;
int weight;
@@ -4283,10 +4277,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
continue;
}
- if (sd_flag & SD_BALANCE_WAKE)
- load_idx = sd->wake_idx;
-
- group = find_idlest_group(sd, p, cpu, load_idx);
+ group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
@@ -5512,7 +5503,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
int local_group, struct sg_lb_stats *sgs)
{
- unsigned long nr_running;
unsigned long load;
int i;
@@ -5521,8 +5511,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
- nr_running = rq->nr_running;
-
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
@@ -5530,7 +5518,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
load = source_load(i, load_idx);
sgs->group_load += load;
- sgs->sum_nr_running += nr_running;
+ sgs->sum_nr_running += rq->nr_running;
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -6521,7 +6509,7 @@ static struct {
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
-static inline int find_new_ilb(int call_cpu)
+static inline int find_new_ilb(void)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
@@ -6536,13 +6524,13 @@ static inline int find_new_ilb(int call_cpu)
* nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
* CPU (if there is one).
*/
-static void nohz_balancer_kick(int cpu)
+static void nohz_balancer_kick(void)
{
int ilb_cpu;
nohz.next_balance++;
- ilb_cpu = find_new_ilb(cpu);
+ ilb_cpu = find_new_ilb();
if (ilb_cpu >= nr_cpu_ids)
return;
@@ -6652,10 +6640,10 @@ void update_max_interval(void)
*
* Balancing parameters are set up in init_sched_domains.
*/
-static void rebalance_domains(int cpu, enum cpu_idle_type idle)
+static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
{
int continue_balancing = 1;
- struct rq *rq = cpu_rq(cpu);
+ int cpu = rq->cpu;
unsigned long interval;
struct sched_domain *sd;
/* Earliest time when we have to do rebalance again */
@@ -6752,9 +6740,9 @@ out:
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
* rebalancing for all the cpus for whom scheduler ticks are stopped.
*/
-static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
+static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
{
- struct rq *this_rq = cpu_rq(this_cpu);
+ int this_cpu = this_rq->cpu;
struct rq *rq;
int balance_cpu;
@@ -6781,7 +6769,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
update_idle_cpu_load(rq);
raw_spin_unlock_irq(&rq->lock);
- rebalance_domains(balance_cpu, CPU_IDLE);
+ rebalance_domains(rq, CPU_IDLE);
if (time_after(this_rq->next_balance, rq->next_balance))
this_rq->next_balance = rq->next_balance;
@@ -6800,14 +6788,14 @@ end:
* - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
* domain span are idle.
*/
-static inline int nohz_kick_needed(struct rq *rq, int cpu)
+static inline int nohz_kick_needed(struct rq *rq)
{
unsigned long now = jiffies;
struct sched_domain *sd;
struct sched_group_power *sgp;
- int nr_busy;
+ int nr_busy, cpu = rq->cpu;
- if (unlikely(idle_cpu(cpu)))
+ if (unlikely(rq->idle_balance))
return 0;
/*
@@ -6856,7 +6844,7 @@ need_kick:
return 1;
}
#else
-static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
#endif
/*
@@ -6865,38 +6853,39 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
*/
static void run_rebalance_domains(struct softirq_action *h)
{
- int this_cpu = smp_processor_id();
- struct rq *this_rq = cpu_rq(this_cpu);
+ struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;
- rebalance_domains(this_cpu, idle);
+ rebalance_domains(this_rq, idle);
/*
* If this cpu has a pending nohz_balance_kick, then do the
* balancing on behalf of the other idle cpus whose ticks are
* stopped.
*/
- nohz_idle_balance(this_cpu, idle);
+ nohz_idle_balance(this_rq, idle);
}
-static inline int on_null_domain(int cpu)
+static inline int on_null_domain(struct rq *rq)
{
- return !rcu_dereference_sched(cpu_rq(cpu)->sd);
+ return !rcu_dereference_sched(rq->sd);
}
/*
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
*/
-void trigger_load_balance(struct rq *rq, int cpu)
+void trigger_load_balance(struct rq *rq)
{
/* Don't need to rebalance while attached to NULL domain */
- if (time_after_eq(jiffies, rq->next_balance) &&
- likely(!on_null_domain(cpu)))
+ if (unlikely(on_null_domain(rq)))
+ return;
+
+ if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
#ifdef CONFIG_NO_HZ_COMMON
- if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
- nohz_balancer_kick(cpu);
+ if (nohz_kick_needed(rq))
+ nohz_balancer_kick();
#endif
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1c40655..a2740b7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1738,7 +1738,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
!test_tsk_need_resched(rq->curr) &&
has_pushable_tasks(rq) &&
p->nr_cpus_allowed > 1 &&
- rt_task(rq->curr) &&
+ (dl_task(rq->curr) || rt_task(rq->curr)) &&
(rq->curr->nr_cpus_allowed < 2 ||
rq->curr->prio <= p->prio))
push_rt_tasks(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 88c85b2..c2119fd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
+#include <linux/sched/deadline.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/stop_machine.h>
@@ -9,6 +10,7 @@
#include <linux/slab.h>
#include "cpupri.h"
+#include "cpudeadline.h"
#include "cpuacct.h"
struct rq;
@@ -73,6 +75,13 @@ extern void update_cpu_load_active(struct rq *this_rq);
#define NICE_0_SHIFT SCHED_LOAD_SHIFT
/*
+ * Single value that decides SCHED_DEADLINE internal math precision.
+ * 10 -> just above 1us
+ * 9 -> just above 0.5us
+ */
+#define DL_SCALE (10)
+
+/*
* These are the 'tuning knobs' of the scheduler:
*/
@@ -81,11 +90,19 @@ extern void update_cpu_load_active(struct rq *this_rq);
*/
#define RUNTIME_INF ((u64)~0ULL)
+static inline int fair_policy(int policy)
+{
+ return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+}
+
static inline int rt_policy(int policy)
{
- if (policy == SCHED_FIFO || policy == SCHED_RR)
- return 1;
- return 0;
+ return policy == SCHED_FIFO || policy == SCHED_RR;
+}
+
+static inline int dl_policy(int policy)
+{
+ return policy == SCHED_DEADLINE;
}
static inline int task_has_rt_policy(struct task_struct *p)
@@ -93,6 +110,25 @@ static inline int task_has_rt_policy(struct task_struct *p)
return rt_policy(p->policy);
}
+static inline int task_has_dl_policy(struct task_struct *p)
+{
+ return dl_policy(p->policy);
+}
+
+static inline bool dl_time_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
+/*
+ * Tells if entity @a should preempt entity @b.
+ */
+static inline bool
+dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
+{
+ return dl_time_before(a->deadline, b->deadline);
+}
+
/*
* This is the priority-queue data structure of the RT scheduling class:
*/
@@ -108,6 +144,47 @@ struct rt_bandwidth {
u64 rt_runtime;
struct hrtimer rt_period_timer;
};
+/*
+ * To keep the bandwidth of -deadline tasks and groups under control
+ * we need some place where:
+ * - store the maximum -deadline bandwidth of the system (the group);
+ * - cache the fraction of that bandwidth that is currently allocated.
+ *
+ * This is all done in the data structure below. It is similar to the
+ * one used for RT-throttling (rt_bandwidth), with the main difference
+ * that, since here we are only interested in admission control, we
+ * do not decrease any runtime while the group "executes", neither we
+ * need a timer to replenish it.
+ *
+ * With respect to SMP, the bandwidth is given on a per-CPU basis,
+ * meaning that:
+ * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
+ * - dl_total_bw array contains, in the i-eth element, the currently
+ * allocated bandwidth on the i-eth CPU.
+ * Moreover, groups consume bandwidth on each CPU, while tasks only
+ * consume bandwidth on the CPU they're running on.
+ * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
+ * that will be shown the next time the proc or cgroup controls will
+ * be red. It on its turn can be changed by writing on its own
+ * control.
+ */
+struct dl_bandwidth {
+ raw_spinlock_t dl_runtime_lock;
+ u64 dl_runtime;
+ u64 dl_period;
+};
+
+static inline int dl_bandwidth_enabled(void)
+{
+ return sysctl_sched_rt_runtime >= 0;
+}
+
+extern struct dl_bw *dl_bw_of(int i);
+
+struct dl_bw {
+ raw_spinlock_t lock;
+ u64 bw, total_bw;
+};
extern struct mutex sched_domains_mutex;
@@ -364,6 +441,42 @@ struct rt_rq {
#endif
};
+/* Deadline class' related fields in a runqueue */
+struct dl_rq {
+ /* runqueue is an rbtree, ordered by deadline */
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+
+ unsigned long dl_nr_running;
+
+#ifdef CONFIG_SMP
+ /*
+ * Deadline values of the currently executing and the
+ * earliest ready task on this rq. Caching these facilitates
+ * the decision wether or not a ready but not running task
+ * should migrate somewhere else.
+ */
+ struct {
+ u64 curr;
+ u64 next;
+ } earliest_dl;
+
+ unsigned long dl_nr_migratory;
+ unsigned long dl_nr_total;
+ int overloaded;
+
+ /*
+ * Tasks on this rq that can be pushed away. They are kept in
+ * an rb-tree, ordered by tasks' deadlines, with caching
+ * of the leftmost (earliest deadline) element.
+ */
+ struct rb_root pushable_dl_tasks_root;
+ struct rb_node *pushable_dl_tasks_leftmost;
+#else
+ struct dl_bw dl_bw;
+#endif
+};
+
#ifdef CONFIG_SMP
/*
@@ -382,6 +495,15 @@ struct root_domain {
cpumask_var_t online;
/*
+ * The bit corresponding to a CPU gets set here if such CPU has more
+ * than one runnable -deadline task (as it is below for RT tasks).
+ */
+ cpumask_var_t dlo_mask;
+ atomic_t dlo_count;
+ struct dl_bw dl_bw;
+ struct cpudl cpudl;
+
+ /*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
@@ -432,6 +554,7 @@ struct rq {
struct cfs_rq cfs;
struct rt_rq rt;
+ struct dl_rq dl;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
@@ -827,8 +950,6 @@ static inline u64 global_rt_runtime(void)
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}
-
-
static inline int task_current(struct rq *rq, struct task_struct *p)
{
return rq->curr == p;
@@ -988,6 +1109,7 @@ static const u32 prio_to_wmult[40] = {
#else
#define ENQUEUE_WAKING 0
#endif
+#define ENQUEUE_REPLENISH 8
#define DEQUEUE_SLEEP 1
@@ -1023,6 +1145,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_fork) (struct task_struct *p);
+ void (*task_dead) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
@@ -1042,6 +1165,7 @@ struct sched_class {
for (class = sched_class_highest; class; class = class->next)
extern const struct sched_class stop_sched_class;
+extern const struct sched_class dl_sched_class;
extern const struct sched_class rt_sched_class;
extern const struct sched_class fair_sched_class;
extern const struct sched_class idle_sched_class;
@@ -1051,7 +1175,7 @@ extern const struct sched_class idle_sched_class;
extern void update_group_power(struct sched_domain *sd, int cpu);
-extern void trigger_load_balance(struct rq *rq, int cpu);
+extern void trigger_load_balance(struct rq *rq);
extern void idle_balance(int this_cpu, struct rq *this_rq);
extern void idle_enter_fair(struct rq *this_rq);
@@ -1068,8 +1192,11 @@ static inline void idle_balance(int cpu, struct rq *rq)
extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void);
extern void update_max_interval(void);
+
+extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
+extern void init_sched_dl_class(void);
extern void resched_task(struct task_struct *p);
extern void resched_cpu(int cpu);
@@ -1077,6 +1204,12 @@ extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+extern struct dl_bandwidth def_dl_bandwidth;
+extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
+extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
+
+unsigned long to_ratio(u64 period, u64 runtime);
+
extern void update_idle_cpu_load(struct rq *this_rq);
extern void init_task_runnable_average(struct task_struct *p);
@@ -1353,6 +1486,7 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
+extern void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq);
extern void cfs_bandwidth_usage_inc(void);
extern void cfs_bandwidth_usage_dec(void);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 47197de..fdb6bb0 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -103,7 +103,7 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
* Simple, special scheduling class for the per-CPU stop tasks:
*/
const struct sched_class stop_sched_class = {
- .next = &rt_sched_class,
+ .next = &dl_sched_class,
.enqueue_task = enqueue_task_stop,
.dequeue_task = dequeue_task_stop,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 11025cc..8a1e6e1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -89,7 +89,7 @@ static void wakeup_softirqd(void)
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip, unsigned int cnt)
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -107,33 +107,21 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
/*
* Were softirqs turned off above:
*/
- if (softirq_count() == cnt)
+ if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
-#else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
-{
- preempt_count_add(cnt);
- barrier();
-}
+EXPORT_SYMBOL(__local_bh_disable_ip);
#endif /* CONFIG_TRACE_IRQFLAGS */
-void local_bh_disable(void)
-{
- __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
-}
-
-EXPORT_SYMBOL(local_bh_disable);
-
static void __local_bh_enable(unsigned int cnt)
{
WARN_ON_ONCE(!irqs_disabled());
- if (softirq_count() == cnt)
+ if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
preempt_count_sub(cnt);
}
@@ -151,7 +139,7 @@ void _local_bh_enable(void)
EXPORT_SYMBOL(_local_bh_enable);
-static inline void _local_bh_enable_ip(unsigned long ip)
+void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -166,7 +154,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
* Keep preemption disabled until we are done with
* softirq processing:
*/
- preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
+ preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
@@ -182,18 +170,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
#endif
preempt_check_resched();
}
-
-void local_bh_enable(void)
-{
- _local_bh_enable_ip(_RET_IP_);
-}
-EXPORT_SYMBOL(local_bh_enable);
-
-void local_bh_enable_ip(unsigned long ip)
-{
- _local_bh_enable_ip(ip);
-}
-EXPORT_SYMBOL(local_bh_enable_ip);
+EXPORT_SYMBOL(__local_bh_enable_ip);
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
@@ -211,14 +188,48 @@ EXPORT_SYMBOL(local_bh_enable_ip);
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10
+#ifdef CONFIG_TRACE_IRQFLAGS
+/*
+ * When we run softirqs from irq_exit() and thus on the hardirq stack we need
+ * to keep the lockdep irq context tracking as tight as possible in order to
+ * not miss-qualify lock contexts and miss possible deadlocks.
+ */
+
+static inline bool lockdep_softirq_start(void)
+{
+ bool in_hardirq = false;
+
+ if (trace_hardirq_context(current)) {
+ in_hardirq = true;
+ trace_hardirq_exit();
+ }
+
+ lockdep_softirq_enter();
+
+ return in_hardirq;
+}
+
+static inline void lockdep_softirq_end(bool in_hardirq)
+{
+ lockdep_softirq_exit();
+
+ if (in_hardirq)
+ trace_hardirq_enter();
+}
+#else
+static inline bool lockdep_softirq_start(void) { return false; }
+static inline void lockdep_softirq_end(bool in_hardirq) { }
+#endif
+
asmlinkage void __do_softirq(void)
{
- struct softirq_action *h;
- __u32 pending;
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
- int cpu;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
+ struct softirq_action *h;
+ bool in_hardirq;
+ __u32 pending;
+ int cpu;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -230,8 +241,8 @@ asmlinkage void __do_softirq(void)
pending = local_softirq_pending();
account_irq_enter_time(current);
- __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
- lockdep_softirq_enter();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+ in_hardirq = lockdep_softirq_start();
cpu = smp_processor_id();
restart:
@@ -278,16 +289,13 @@ restart:
wakeup_softirqd();
}
- lockdep_softirq_exit();
-
+ lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
-
-
asmlinkage void do_softirq(void)
{
__u32 pending;
@@ -311,8 +319,6 @@ asmlinkage void do_softirq(void)
*/
void irq_enter(void)
{
- int cpu = smp_processor_id();
-
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -320,7 +326,7 @@ void irq_enter(void)
* here, as softirq will be serviced on return from interrupt.
*/
local_bh_disable();
- tick_check_idle(cpu);
+ tick_check_idle();
_local_bh_enable();
}
@@ -375,13 +381,13 @@ void irq_exit(void)
#endif
account_irq_exit_time(current);
- trace_hardirq_exit();
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
tick_irq_exit();
rcu_irq_exit();
+ trace_hardirq_exit(); /* must be last! */
}
/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 34a6047..c8da99f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -385,13 +385,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
{
- .procname = "numa_balancing_settle_count",
- .data = &sysctl_numa_balancing_settle_count,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "numa_balancing_migrate_deferred",
.data = &sysctl_numa_balancing_migrate_deferred,
.maxlen = sizeof(unsigned int),
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 68b7993..0abb364 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -74,7 +74,7 @@ unsigned long long notrace sched_clock(void)
return cd.epoch_ns;
do {
- seq = read_seqcount_begin(&cd.seq);
+ seq = raw_read_seqcount_begin(&cd.seq);
epoch_cyc = cd.epoch_cyc;
epoch_ns = cd.epoch_ns;
} while (read_seqcount_retry(&cd.seq, seq));
@@ -99,10 +99,10 @@ static void notrace update_sched_clock(void)
cd.mult, cd.shift);
raw_local_irq_save(flags);
- write_seqcount_begin(&cd.seq);
+ raw_write_seqcount_begin(&cd.seq);
cd.epoch_ns = ns;
cd.epoch_cyc = cyc;
- write_seqcount_end(&cd.seq);
+ raw_write_seqcount_end(&cd.seq);
raw_local_irq_restore(flags);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 9532690..43780ab 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
* Called from irq_enter() when idle was interrupted to reenable the
* per cpu device.
*/
-void tick_check_oneshot_broadcast(int cpu)
+void tick_check_oneshot_broadcast_this_cpu(void)
{
- if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
- struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+ if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
+ struct tick_device *td = &__get_cpu_var(tick_cpu_device);
/*
* We might be in the middle of switching over from
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 162b03a..20b2fe3 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -85,6 +85,7 @@ static void tick_periodic(int cpu)
do_timer(1);
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}
update_process_times(user_mode(get_irq_regs()));
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 18e71f7..8329669 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
-extern void tick_check_oneshot_broadcast(int cpu);
+extern void tick_check_oneshot_broadcast_this_cpu(void);
bool tick_broadcast_oneshot_available(void);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
-static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
static inline bool tick_broadcast_oneshot_available(void) { return true; }
# endif /* !BROADCAST */
@@ -155,3 +155,4 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
+extern void update_wall_time(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index ea20f7d..08cb0c3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -86,6 +86,7 @@ static void tick_do_update_jiffies64(ktime_t now)
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}
/*
@@ -177,7 +178,7 @@ static bool can_stop_full_tick(void)
* TODO: kick full dynticks CPUs when
* sched_clock_stable is set.
*/
- if (!sched_clock_stable) {
+ if (!sched_clock_stable()) {
trace_tick_stop(0, "unstable sched clock\n");
/*
* Don't allow the user to think they can get
@@ -391,11 +392,9 @@ __setup("nohz=", setup_tick_nohz);
*/
static void tick_nohz_update_jiffies(ktime_t now)
{
- int cpu = smp_processor_id();
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags;
- ts->idle_waketime = now;
+ __this_cpu_write(tick_cpu_sched.idle_waketime, now);
local_irq_save(flags);
tick_do_update_jiffies64(now);
@@ -426,17 +425,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
}
-static void tick_nohz_stop_idle(int cpu, ktime_t now)
+static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
{
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-
- update_ts_time_stats(cpu, ts, now, NULL);
+ update_ts_time_stats(smp_processor_id(), ts, now, NULL);
ts->idle_active = 0;
sched_clock_idle_wakeup_event(0);
}
-static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
+static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{
ktime_t now = ktime_get();
@@ -754,7 +751,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
ktime_t now, expires;
int cpu = smp_processor_id();
- now = tick_nohz_start_idle(cpu, ts);
+ now = tick_nohz_start_idle(ts);
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
@@ -911,8 +908,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
*/
void tick_nohz_idle_exit(void)
{
- int cpu = smp_processor_id();
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t now;
local_irq_disable();
@@ -925,7 +921,7 @@ void tick_nohz_idle_exit(void)
now = ktime_get();
if (ts->idle_active)
- tick_nohz_stop_idle(cpu, now);
+ tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) {
tick_nohz_restart_sched_tick(ts, now);
@@ -1009,12 +1005,10 @@ static void tick_nohz_switch_to_nohz(void)
* timer and do not touch the other magic bits which need to be done
* when idle is left.
*/
-static void tick_nohz_kick_tick(int cpu, ktime_t now)
+static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
{
#if 0
/* Switch back to 2.6.27 behaviour */
-
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t delta;
/*
@@ -1029,36 +1023,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
#endif
}
-static inline void tick_check_nohz(int cpu)
+static inline void tick_check_nohz_this_cpu(void)
{
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t now;
if (!ts->idle_active && !ts->tick_stopped)
return;
now = ktime_get();
if (ts->idle_active)
- tick_nohz_stop_idle(cpu, now);
+ tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) {
tick_nohz_update_jiffies(now);
- tick_nohz_kick_tick(cpu, now);
+ tick_nohz_kick_tick(ts, now);
}
}
#else
static inline void tick_nohz_switch_to_nohz(void) { }
-static inline void tick_check_nohz(int cpu) { }
+static inline void tick_check_nohz_this_cpu(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Called from irq_enter to notify about the possible interruption of idle()
*/
-void tick_check_idle(int cpu)
+void tick_check_idle(void)
{
- tick_check_oneshot_broadcast(cpu);
- tick_check_nohz(cpu);
+ tick_check_oneshot_broadcast_this_cpu();
+ tick_check_nohz_this_cpu();
}
/*
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 87b4f00..0aa4ce81 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -77,7 +77,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
tk->wall_to_monotonic = wtm;
set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
tk->offs_real = timespec_to_ktime(tmp);
- tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
+ tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
}
static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
@@ -90,8 +90,9 @@ static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
}
/**
- * timekeeper_setup_internals - Set up internals to use clocksource clock.
+ * tk_setup_internals - Set up internals to use clocksource clock.
*
+ * @tk: The target timekeeper to setup.
* @clock: Pointer to clocksource.
*
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
@@ -595,7 +596,7 @@ s32 timekeeping_get_tai_offset(void)
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
{
tk->tai_offset = tai_offset;
- tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
+ tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
}
/**
@@ -610,6 +611,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&timekeeper_seq);
__timekeeping_set_tai_offset(tk, tai_offset);
+ timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
clock_was_set();
@@ -1023,6 +1025,8 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
+
+ timekeeping_update(tk, TK_MIRROR);
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
@@ -1130,16 +1134,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
* we can adjust by 1.
*/
error >>= 2;
- /*
- * XXX - In update_wall_time, we round up to the next
- * nanosecond, and store the amount rounded up into
- * the error. This causes the likely below to be unlikely.
- *
- * The proper fix is to avoid rounding up by using
- * the high precision tk->xtime_nsec instead of
- * xtime.tv_nsec everywhere. Fixing this will take some
- * time.
- */
if (likely(error <= interval))
adj = 1;
else
@@ -1255,7 +1249,7 @@ out_adjust:
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
{
u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
- unsigned int action = 0;
+ unsigned int clock_set = 0;
while (tk->xtime_nsec >= nsecps) {
int leap;
@@ -1277,11 +1271,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
- clock_was_set_delayed();
- action = TK_CLOCK_WAS_SET;
+ clock_set = TK_CLOCK_WAS_SET;
}
}
- return action;
+ return clock_set;
}
/**
@@ -1294,7 +1287,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
* Returns the unconsumed cycles.
*/
static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
- u32 shift)
+ u32 shift,
+ unsigned int *clock_set)
{
cycle_t interval = tk->cycle_interval << shift;
u64 raw_nsecs;
@@ -1308,7 +1302,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
tk->cycle_last += interval;
tk->xtime_nsec += tk->xtime_interval << shift;
- accumulate_nsecs_to_secs(tk);
+ *clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
raw_nsecs = (u64)tk->raw_interval << shift;
@@ -1359,14 +1353,14 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
* update_wall_time - Uses the current clocksource to increment the wall time
*
*/
-static void update_wall_time(void)
+void update_wall_time(void)
{
struct clocksource *clock;
struct timekeeper *real_tk = &timekeeper;
struct timekeeper *tk = &shadow_timekeeper;
cycle_t offset;
int shift = 0, maxshift;
- unsigned int action;
+ unsigned int clock_set = 0;
unsigned long flags;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
@@ -1401,7 +1395,8 @@ static void update_wall_time(void)
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
while (offset >= tk->cycle_interval) {
- offset = logarithmic_accumulation(tk, offset, shift);
+ offset = logarithmic_accumulation(tk, offset, shift,
+ &clock_set);
if (offset < tk->cycle_interval<<shift)
shift--;
}
@@ -1419,7 +1414,7 @@ static void update_wall_time(void)
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
- action = accumulate_nsecs_to_secs(tk);
+ clock_set |= accumulate_nsecs_to_secs(tk);
write_seqcount_begin(&timekeeper_seq);
/* Update clock->cycle_last with the new value */
@@ -1435,10 +1430,12 @@ static void update_wall_time(void)
* updating.
*/
memcpy(real_tk, tk, sizeof(*tk));
- timekeeping_update(real_tk, action);
+ timekeeping_update(real_tk, clock_set);
write_seqcount_end(&timekeeper_seq);
out:
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ if (clock_set)
+ clock_was_set();
}
/**
@@ -1583,7 +1580,6 @@ struct timespec get_monotonic_coarse(void)
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
- update_wall_time();
calc_global_load(ticks);
}
@@ -1698,12 +1694,14 @@ int do_adjtimex(struct timex *txc)
if (tai != orig_tai) {
__timekeeping_set_tai_offset(tk, tai);
- update_pvclock_gtod(tk, true);
- clock_was_set_delayed();
+ timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
}
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ if (tai != orig_tai)
+ clock_was_set();
+
ntp_notify_cmos_timer();
return ret;
@@ -1739,4 +1737,5 @@ void xtime_update(unsigned long ticks)
write_seqlock(&jiffies_lock);
do_timer(ticks);
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cc2f66f..294b8a2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2558,7 +2558,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
if (unlikely(test_time_stamp(delta))) {
int local_clock_stable = 1;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
- local_clock_stable = sched_clock_stable;
+ local_clock_stable = sched_clock_stable();
#endif
WARN_ONCE(delta > (1ULL << 59),
KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index fee77e1..6e32635 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/sched/rt.h>
+#include <linux/sched/deadline.h>
#include <trace/events/sched.h>
#include "trace.h"
@@ -27,6 +28,8 @@ static int wakeup_cpu;
static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
static int wakeup_rt;
+static int wakeup_dl;
+static int tracing_dl = 0;
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -437,6 +440,7 @@ static void __wakeup_reset(struct trace_array *tr)
{
wakeup_cpu = -1;
wakeup_prio = -1;
+ tracing_dl = 0;
if (wakeup_task)
put_task_struct(wakeup_task);
@@ -472,9 +476,17 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
tracing_record_cmdline(p);
tracing_record_cmdline(current);
- if ((wakeup_rt && !rt_task(p)) ||
- p->prio >= wakeup_prio ||
- p->prio >= current->prio)
+ /*
+ * Semantic is like this:
+ * - wakeup tracer handles all tasks in the system, independently
+ * from their scheduling class;
+ * - wakeup_rt tracer handles tasks belonging to sched_dl and
+ * sched_rt class;
+ * - wakeup_dl handles tasks belonging to sched_dl class only.
+ */
+ if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
+ (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
+ (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
pc = preempt_count();
@@ -486,7 +498,8 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
arch_spin_lock(&wakeup_lock);
/* check for races. */
- if (!tracer_enabled || p->prio >= wakeup_prio)
+ if (!tracer_enabled || tracing_dl ||
+ (!dl_task(p) && p->prio >= wakeup_prio))
goto out_locked;
/* reset the trace */
@@ -496,6 +509,15 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
wakeup_current_cpu = wakeup_cpu;
wakeup_prio = p->prio;
+ /*
+ * Once you start tracing a -deadline task, don't bother tracing
+ * another task until the first one wakes up.
+ */
+ if (dl_task(p))
+ tracing_dl = 1;
+ else
+ tracing_dl = 0;
+
wakeup_task = p;
get_task_struct(wakeup_task);
@@ -597,16 +619,25 @@ static int __wakeup_tracer_init(struct trace_array *tr)
static int wakeup_tracer_init(struct trace_array *tr)
{
+ wakeup_dl = 0;
wakeup_rt = 0;
return __wakeup_tracer_init(tr);
}
static int wakeup_rt_tracer_init(struct trace_array *tr)
{
+ wakeup_dl = 0;
wakeup_rt = 1;
return __wakeup_tracer_init(tr);
}
+static int wakeup_dl_tracer_init(struct trace_array *tr)
+{
+ wakeup_dl = 1;
+ wakeup_rt = 0;
+ return __wakeup_tracer_init(tr);
+}
+
static void wakeup_tracer_reset(struct trace_array *tr)
{
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
@@ -674,6 +705,28 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.use_max_tr = true,
};
+static struct tracer wakeup_dl_tracer __read_mostly =
+{
+ .name = "wakeup_dl",
+ .init = wakeup_dl_tracer_init,
+ .reset = wakeup_tracer_reset,
+ .start = wakeup_tracer_start,
+ .stop = wakeup_tracer_stop,
+ .wait_pipe = poll_wait_pipe,
+ .print_max = true,
+ .print_header = wakeup_print_header,
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
+ .flag_changed = wakeup_flag_changed,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+#endif
+ .open = wakeup_trace_open,
+ .close = wakeup_trace_close,
+ .use_max_tr = true,
+};
+
__init static int init_wakeup_tracer(void)
{
int ret;
@@ -686,6 +739,10 @@ __init static int init_wakeup_tracer(void)
if (ret)
return ret;
+ ret = register_tracer(&wakeup_dl_tracer);
+ if (ret)
+ return ret;
+
return 0;
}
core_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index a7329b7..e98fca6 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1022,11 +1022,16 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
#ifdef CONFIG_SCHED_TRACER
static int trace_wakeup_test_thread(void *data)
{
- /* Make this a RT thread, doesn't need to be too high */
- static const struct sched_param param = { .sched_priority = 5 };
+ /* Make this a -deadline thread */
+ static const struct sched_attr attr = {
+ .sched_policy = SCHED_DEADLINE,
+ .sched_runtime = 100000ULL,
+ .sched_deadline = 10000000ULL,
+ .sched_period = 10000000ULL
+ };
struct completion *x = data;
- sched_setscheduler(current, SCHED_FIFO, &param);
+ sched_setattr(current, &attr);
/* Make it know we have a new prio */
complete(x);
@@ -1040,8 +1045,8 @@ static int trace_wakeup_test_thread(void *data)
/* we are awake, now wait to disappear */
while (!kthread_should_stop()) {
/*
- * This is an RT task, do short sleeps to let
- * others run.
+ * This will likely be the system top priority
+ * task, do short sleeps to let others run.
*/
msleep(100);
}
@@ -1054,21 +1059,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
unsigned long save_max = tracing_max_latency;
struct task_struct *p;
- struct completion isrt;
+ struct completion is_ready;
unsigned long count;
int ret;
- init_completion(&isrt);
+ init_completion(&is_ready);
- /* create a high prio thread */
- p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
+ /* create a -deadline thread */
+ p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
if (IS_ERR(p)) {
printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
return -1;
}
- /* make sure the thread is running at an RT prio */
- wait_for_completion(&isrt);
+ /* make sure the thread is running at -deadline policy */
+ wait_for_completion(&is_ready);
/* start the tracing */
ret = tracer_init(trace, tr);
@@ -1082,19 +1087,19 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
while (p->on_rq) {
/*
- * Sleep to make sure the RT thread is asleep too.
+ * Sleep to make sure the -deadline thread is asleep too.
* On virtual machines we can't rely on timings,
* but we want to make sure this test still works.
*/
msleep(100);
}
- init_completion(&isrt);
+ init_completion(&is_ready);
wake_up_process(p);
/* Wait for the task to wake up */
- wait_for_completion(&isrt);
+ wait_for_completion(&is_ready);
/* stop the tracing. */
tracing_stop();
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index db25707..6982094 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -761,6 +761,15 @@ config PANIC_ON_OOPS_VALUE
default 0 if !PANIC_ON_OOPS
default 1 if PANIC_ON_OOPS
+config PANIC_TIMEOUT
+ int "panic timeout"
+ default 0
+ help
+ Set the timeout value (in seconds) until a reboot occurs when the
+ the kernel panics. If n = 0, then we wait forever. A timeout
+ value n > 0 will wait n seconds before rebooting, while a timeout
+ value n < 0 will reboot immediately.
+
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/kobject.c b/lib/kobject.c
index 5b4b888..b0b2666 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -13,11 +13,11 @@
*/
#include <linux/kobject.h>
-#include <linux/kobj_completion.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
+#include <linux/random.h>
/**
* kobject_namespace - return @kobj's namespace tag
@@ -65,13 +65,17 @@ static int populate_dir(struct kobject *kobj)
static int create_dir(struct kobject *kobj)
{
+ const struct kobj_ns_type_operations *ops;
int error;
error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
- if (!error) {
- error = populate_dir(kobj);
- if (error)
- sysfs_remove_dir(kobj);
+ if (error)
+ return error;
+
+ error = populate_dir(kobj);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
}
/*
@@ -80,7 +84,20 @@ static int create_dir(struct kobject *kobj)
*/
sysfs_get(kobj->sd);
- return error;
+ /*
+ * If @kobj has ns_ops, its children need to be filtered based on
+ * their namespace tags. Enable namespace support on @kobj->sd.
+ */
+ ops = kobj_child_ns_ops(kobj);
+ if (ops) {
+ BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE);
+ BUG_ON(ops->type >= KOBJ_NS_TYPES);
+ BUG_ON(!kobj_ns_type_registered(ops->type));
+
+ kernfs_enable_ns(kobj->sd);
+ }
+
+ return 0;
}
static int get_kobj_path_length(struct kobject *kobj)
@@ -247,8 +264,10 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
return 0;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
- if (!kobj->name)
+ if (!kobj->name) {
+ kobj->name = old_name;
return -ENOMEM;
+ }
/* ewww... some of these buggers have '/' in the name ... */
while ((s = strchr(kobj->name, '/')))
@@ -346,7 +365,7 @@ static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
*
* If @parent is set, then the parent of the @kobj will be set to it.
* If @parent is NULL, then the parent of the @kobj will be set to the
- * kobject associted with the kset assigned to this kobject. If no kset
+ * kobject associated with the kset assigned to this kobject. If no kset
* is assigned to the kobject, then the kobject will be located in the
* root of the sysfs tree.
*
@@ -536,7 +555,7 @@ out:
*/
void kobject_del(struct kobject *kobj)
{
- struct sysfs_dirent *sd;
+ struct kernfs_node *sd;
if (!kobj)
return;
@@ -625,10 +644,12 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
- kobject_name(kobj), kobj, __func__, kobj->parent);
+ unsigned long delay = HZ + HZ * (get_random_int() & 0x3);
+ pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
- schedule_delayed_work(&kobj->release, HZ);
+
+ schedule_delayed_work(&kobj->release, delay);
#else
kobject_cleanup(kobj);
#endif
@@ -760,55 +781,6 @@ const struct sysfs_ops kobj_sysfs_ops = {
};
/**
- * kobj_completion_init - initialize a kobj_completion object.
- * @kc: kobj_completion
- * @ktype: type of kobject to initialize
- *
- * kobj_completion structures can be embedded within structures with different
- * lifetime rules. During the release of the enclosing object, we can
- * wait on the release of the kobject so that we don't free it while it's
- * still busy.
- */
-void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype)
-{
- init_completion(&kc->kc_unregister);
- kobject_init(&kc->kc_kobj, ktype);
-}
-EXPORT_SYMBOL_GPL(kobj_completion_init);
-
-/**
- * kobj_completion_release - release a kobj_completion object
- * @kobj: kobject embedded in kobj_completion
- *
- * Used with kobject_release to notify waiters that the kobject has been
- * released.
- */
-void kobj_completion_release(struct kobject *kobj)
-{
- struct kobj_completion *kc = kobj_to_kobj_completion(kobj);
- complete(&kc->kc_unregister);
-}
-EXPORT_SYMBOL_GPL(kobj_completion_release);
-
-/**
- * kobj_completion_del_and_wait - release the kobject and wait for it
- * @kc: kobj_completion object to release
- *
- * Delete the kobject from sysfs and drop the reference count. Then wait
- * until any other outstanding references are also dropped. This routine
- * is only necessary once other references may have been taken on the
- * kobject. Typically this happens when the kobject has been published
- * to sysfs via kobject_add.
- */
-void kobj_completion_del_and_wait(struct kobj_completion *kc)
-{
- kobject_del(&kc->kc_kobj);
- kobject_put(&kc->kc_kobj);
- wait_for_completion(&kc->kc_unregister);
-}
-EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait);
-
-/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
@@ -835,6 +807,7 @@ void kset_unregister(struct kset *k)
{
if (!k)
return;
+ kobject_del(&k->kobj);
kobject_put(&k->kobj);
}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7473ee3..8280a5d 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
- __this_cpu_write(*fbc->counters, 0);
} else {
- __this_cpu_write(*fbc->counters, count);
+ this_cpu_add(*fbc->counters, amount);
}
preempt_enable();
}
diff --git a/mm/fremap.c b/mm/fremap.c
index 5bff081..bbc4d66 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -208,9 +208,10 @@ get_write_lock:
if (mapping_cap_account_dirty(mapping)) {
unsigned long addr;
struct file *file = get_file(vma->vm_file);
+ /* mmap_region may free vma; grab the info now */
+ vm_flags = vma->vm_flags;
- addr = mmap_region(file, start, size,
- vma->vm_flags, pgoff);
+ addr = mmap_region(file, start, size, vm_flags, pgoff);
fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
@@ -218,7 +219,7 @@ get_write_lock:
BUG_ON(addr != start);
err = 0;
}
- goto out;
+ goto out_freed;
}
mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
@@ -253,6 +254,7 @@ get_write_lock:
out:
if (vma)
vm_flags = vma->vm_flags;
+out_freed:
if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
else
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7de1bf8..95d1acb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -883,9 +883,6 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_unlock;
}
- /* mmap_sem prevents this happening but warn if that changes */
- WARN_ON(pmd_trans_migrating(pmd));
-
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(src_ptl);
@@ -1157,7 +1154,7 @@ alloc:
new_page = NULL;
if (unlikely(!new_page)) {
- if (is_huge_zero_pmd(orig_pmd)) {
+ if (!page) {
ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
address, pmd, orig_pmd, haddr);
} else {
@@ -1184,7 +1181,7 @@ alloc:
count_vm_event(THP_FAULT_ALLOC);
- if (is_huge_zero_pmd(orig_pmd))
+ if (!page)
clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
else
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1210,7 +1207,7 @@ alloc:
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, pmd);
- if (is_huge_zero_pmd(orig_pmd)) {
+ if (!page) {
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
put_huge_zero_page();
} else {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bf5e894..7f1a356 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -338,7 +338,7 @@ struct mem_cgroup {
static size_t memcg_size(void)
{
return sizeof(struct mem_cgroup) +
- nr_node_ids * sizeof(struct mem_cgroup_per_node);
+ nr_node_ids * sizeof(struct mem_cgroup_per_node *);
}
/* internal only representation about the status of kmem accounting. */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index db08af9..fabe550 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
BUG_ON(!PageHWPoison(p));
return SWAP_FAIL;
}
+ /*
+ * We pinned the head page for hwpoison handling,
+ * now we split the thp and we are interested in
+ * the hwpoisoned raw page, so move the refcount
+ * to it.
+ */
+ if (hpage != p) {
+ put_page(hpage);
+ get_page(p);
+ }
/* THP is split, so ppage should be the real poisoned page. */
ppage = p;
}
diff --git a/mm/mlock.c b/mm/mlock.c
index d480cd6..192e6ee 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page)
/**
* munlock_vma_page - munlock a vma page
- * @page - page to be unlocked
+ * @page - page to be unlocked, either a normal page or THP page head
+ *
+ * returns the size of the page as a page mask (0 for normal page,
+ * HPAGE_PMD_NR - 1 for THP head page)
*
* called from munlock()/munmap() path with page supposedly on the LRU.
* When we munlock a page, because the vma where we found the page is being
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page)
*/
unsigned int munlock_vma_page(struct page *page)
{
- unsigned int page_mask = 0;
+ unsigned int nr_pages;
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
- unsigned int nr_pages = hpage_nr_pages(page);
+ nr_pages = hpage_nr_pages(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
- page_mask = nr_pages - 1;
if (!isolate_lru_page(page))
__munlock_isolated_page(page);
else
__munlock_isolation_failed(page);
+ } else {
+ nr_pages = hpage_nr_pages(page);
}
- return page_mask;
+ /*
+ * Regardless of the original PageMlocked flag, we determine nr_pages
+ * after touching the flag. This leaves a possible race with a THP page
+ * split, such that a whole THP page was munlocked, but nr_pages == 1.
+ * Returning a smaller mask due to that is OK, the worst that can
+ * happen is subsequent useless scanning of the former tail pages.
+ * The NR_MLOCK accounting can however become broken.
+ */
+ return nr_pages - 1;
}
/**
@@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
int i;
int nr = pagevec_count(pvec);
- int delta_munlocked = -nr;
+ int delta_munlocked;
struct pagevec pvec_putback;
int pgrescued = 0;
+ pagevec_init(&pvec_putback, 0);
+
/* Phase 1: page isolation */
spin_lock_irq(&zone->lru_lock);
for (i = 0; i < nr; i++) {
@@ -318,18 +332,21 @@ skip_munlock:
/*
* We won't be munlocking this page in the next phase
* but we still need to release the follow_page_mask()
- * pin.
+ * pin. We cannot do it under lru_lock however. If it's
+ * the last pin, __page_cache_release would deadlock.
*/
+ pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
- put_page(page);
- delta_munlocked++;
}
}
+ delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(&zone->lru_lock);
+ /* Now we can release pins of pages that we are not munlocking */
+ pagevec_release(&pvec_putback);
+
/* Phase 2: page munlock */
- pagevec_init(&pvec_putback, 0);
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
@@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
while (start < end) {
struct page *page = NULL;
- unsigned int page_mask, page_increm;
+ unsigned int page_mask;
+ unsigned long page_increm;
struct pagevec pvec;
struct zone *zone;
int zoneid;
@@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
goto next;
}
}
- page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ /* It's a bug to munlock in the middle of a THP page */
+ VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
+ page_increm = 1 + page_mask;
start += page_increm * PAGE_SIZE;
next:
cond_resched();
diff --git a/mm/util.c b/mm/util.c
index f7bc209..808f375 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -390,7 +390,10 @@ struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;
- VM_BUG_ON(PageSlab(page));
+ /* This happens if someone calls flush_dcache_page on slab page */
+ if (unlikely(PageSlab(page)))
+ return NULL;
+
if (unlikely(PageSwapCache(page))) {
swp_entry_t entry;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 762896e..47c908f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .rebuild = dev_rebuild_header,
+ .parse = eth_header_parse,
+};
+
static struct device_type vlan_type = {
.name = "vlan",
};
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
- dev->header_ops = real_dev->header_ops;
+ dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a2b480a..b9c8a6e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
hard_iface->bat_iv.ogm_buff = ogm_buff;
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
- batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
- batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
- batadv_ogm_packet->header.ttl = 2;
+ batadv_ogm_packet->packet_type = BATADV_IV_OGM;
+ batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
+ batadv_ogm_packet->ttl = 2;
batadv_ogm_packet->flags = BATADV_NO_FLAGS;
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
- batadv_ogm_packet->header.ttl = BATADV_TTL;
+ batadv_ogm_packet->ttl = BATADV_TTL;
}
/* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
- batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
(batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
"on" : "off"),
hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
/* multihomed peer assumed
* non-primary OGMs are only broadcasted on their interface
*/
- if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
+ if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
/* FIXME: what about aggregated packets ? */
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
(forw_packet->own ? "Sending own" : "Forwarding"),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
- batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->ttl,
forw_packet->if_incoming->net_dev->name,
forw_packet->if_incoming->net_dev->dev_addr);
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
*/
if ((!directlink) &&
(!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
- (batadv_ogm_packet->header.ttl != 1) &&
+ (batadv_ogm_packet->ttl != 1) &&
/* own packets originating non-primary
* interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
* interface only - we still can aggregate
*/
if ((directlink) &&
- (new_bat_ogm_packet->header.ttl == 1) &&
+ (new_bat_ogm_packet->ttl == 1) &&
(forw_packet->if_incoming == if_incoming) &&
/* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
uint16_t tvlv_len;
- if (batadv_ogm_packet->header.ttl <= 1) {
+ if (batadv_ogm_packet->ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
- batadv_ogm_packet->header.ttl--;
+ batadv_ogm_packet->ttl--;
memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
/* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding packet: tq: %i, ttl: %i\n",
- batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
/* switch of primaries first hop flag when forwarding */
batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
if (dup_status == BATADV_NO_DUP) {
- orig_node->last_ttl = batadv_ogm_packet->header.ttl;
- neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
+ orig_node->last_ttl = batadv_ogm_packet->ttl;
+ neigh_node->last_ttl = batadv_ogm_packet->ttl;
}
batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
+ if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
batadv_ogm_packet->prev_sender,
ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
- batadv_ogm_packet->header.ttl,
- batadv_ogm_packet->header.version, has_directlink_flag);
+ batadv_ogm_packet->ttl,
+ batadv_ogm_packet->version, has_directlink_flag);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* seqno and similar ttl as the non-duplicate
*/
sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
- similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
(sameseq && similar_ttl)))
batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6c8c393..b316a4c 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- switch (unicast_4addr_packet->u.header.packet_type) {
+ switch (unicast_4addr_packet->u.packet_type) {
case BATADV_UNICAST:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
break;
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
- unicast_4addr_packet->u.header.packet_type);
+ unicast_4addr_packet->u.packet_type);
}
break;
case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within an unknown packet type (0x%x)\n",
- unicast_4addr_packet->u.header.packet_type);
+ unicast_4addr_packet->u.packet_type);
}
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 271d321..6ddb614 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
skb->len + ETH_HLEN);
- packet->header.ttl--;
+ packet->ttl--;
batadv_send_skb_packet(skb, neigh_node->if_incoming,
neigh_node->addr);
ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
goto out_err;
/* Create one header to be copied to all fragments */
- frag_header.header.packet_type = BATADV_UNICAST_FRAG;
- frag_header.header.version = BATADV_COMPAT_VERSION;
- frag_header.header.ttl = BATADV_TTL;
+ frag_header.packet_type = BATADV_UNICAST_FRAG;
+ frag_header.version = BATADV_COMPAT_VERSION;
+ frag_header.ttl = BATADV_TTL;
frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
frag_header.reserved = 0;
frag_header.no = 0;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 29ae4ef..130cc32 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto free_skb;
}
- if (icmp_header->header.packet_type != BATADV_ICMP) {
+ if (icmp_header->packet_type != BATADV_ICMP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
icmp_header->uid = socket_client->index;
- if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+ if (icmp_header->version != BATADV_COMPAT_VERSION) {
icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
- icmp_header->header.version = BATADV_COMPAT_VERSION;
+ icmp_header->version = BATADV_COMPAT_VERSION;
batadv_socket_add_packet(socket_client, icmp_header,
packet_len);
goto free_skb;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c51a5e5..faba0f6 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -277,7 +277,7 @@ int batadv_max_header_len(void)
sizeof(struct batadv_coded_packet));
#endif
- return header_len;
+ return header_len + ETH_HLEN;
}
/**
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
- if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batadv_ogm_packet->header.version);
+ batadv_ogm_packet->version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
- idx = batadv_ogm_packet->header.packet_type;
+ idx = batadv_ogm_packet->packet_type;
ret = (*batadv_rx_handler[idx])(skb, hard_iface);
if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
- BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
- BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
/* broadcast packet */
batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
skb_reserve(skb, ETH_HLEN);
tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
- unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
- unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
- unicast_tvlv_packet->header.ttl = BATADV_TTL;
+ unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
+ unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
+ unicast_tvlv_packet->ttl = BATADV_TTL;
unicast_tvlv_packet->reserved = 0;
unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
unicast_tvlv_packet->align = 0;
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 351e199..511d7e1 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
{
if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
return false;
- if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
+ if (orig_node->last_ttl != ogm_packet->ttl + 1)
return false;
if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
coded_packet = (struct batadv_coded_packet *)skb_dest->data;
skb_reset_mac_header(skb_dest);
- coded_packet->header.packet_type = BATADV_CODED;
- coded_packet->header.version = BATADV_COMPAT_VERSION;
- coded_packet->header.ttl = packet1->header.ttl;
+ coded_packet->packet_type = BATADV_CODED;
+ coded_packet->version = BATADV_COMPAT_VERSION;
+ coded_packet->ttl = packet1->ttl;
/* Info about first unicast packet */
memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
memcpy(coded_packet->second_source, second_source, ETH_ALEN);
memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
coded_packet->second_crc = packet_id2;
- coded_packet->second_ttl = packet2->header.ttl;
+ coded_packet->second_ttl = packet2->ttl;
coded_packet->second_ttvn = packet2->ttvn;
coded_packet->coded_len = htons(coding_len);
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
/* We only handle unicast packets */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
- if (packet->header.packet_type != BATADV_UNICAST)
+ if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
/* Check for supported packet type */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
- if (packet->header.packet_type != BATADV_UNICAST)
+ if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
ttvn = coded_packet_tmp.second_ttvn;
} else {
orig_dest = coded_packet_tmp.first_orig_dest;
- ttl = coded_packet_tmp.header.ttl;
+ ttl = coded_packet_tmp.ttl;
ttvn = coded_packet_tmp.first_ttvn;
}
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
/* Create decoded unicast packet */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.packet_type = BATADV_UNICAST;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
- unicast_packet->header.ttl = ttl;
+ unicast_packet->packet_type = BATADV_UNICAST;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
+ unicast_packet->ttl = ttl;
memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
unicast_packet->ttvn = ttvn;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 207459b..2dd8f24 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
BATADV_TVLV_ROAM = 0x05,
};
+#pragma pack(2)
/* the destination hardware field in the ARP frame is used to
* transport the claim type and the group id
*/
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
uint8_t type; /* bla_claimframe */
__be16 group; /* group id */
};
-
-struct batadv_header {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
- /* the parent struct has to add a byte after the header to make
- * everything 4 bytes aligned again
- */
-};
+#pragma pack()
/**
* struct batadv_ogm_packet - ogm (routing protocol) packet
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @flags: contains routing relevant flags - see enum batadv_iv_flags
* @tvlv_len: length of tvlv data following the ogm header
*/
struct batadv_ogm_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t flags;
__be32 seqno;
uint8_t orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
/**
- * batadv_icmp_header - common ICMP header
- * @header: common batman header
+ * batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
* @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
*/
struct batadv_icmp_header {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
uint8_t uid;
+ uint8_t align[3];
};
/**
* batadv_icmp_packet - ICMP packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
* @reserved: not used - useful for alignment
* @seqno: ICMP sequence number
*/
struct batadv_icmp_packet {
- struct batadv_icmp_header icmph;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ uint8_t uid;
uint8_t reserved;
__be16 seqno;
};
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
/**
* batadv_icmp_packet_rr - ICMP RouteRecord packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
* @rr_cur: number of entries the rr array
* @seqno: ICMP sequence number
* @rr: route record array
*/
struct batadv_icmp_packet_rr {
- struct batadv_icmp_header icmph;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ uint8_t uid;
uint8_t rr_cur;
__be16 seqno;
uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
*/
#pragma pack(2)
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
struct batadv_unicast_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
/**
* struct batadv_frag_packet - fragmented packet
- * @header: common batman packet header with type, compatversion, and ttl
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @dest: final destination used when routing fragments
* @orig: originator of the fragment used when merging the packet
* @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
* @total_size: size of the merged packet
*/
struct batadv_frag_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
#if defined(__BIG_ENDIAN_BITFIELD)
uint8_t no:4;
uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
__be16 total_size;
};
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
struct batadv_bcast_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t reserved;
__be32 seqno;
uint8_t orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
*/
};
-#pragma pack()
-
/**
* struct batadv_coded_packet - network coded packet
- * @header: common batman packet header and ttl of first included packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @reserved: Align following fields to 2-byte boundaries
* @first_source: original source of first included packet
* @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
* @coded_len: length of network coded part of the payload
*/
struct batadv_coded_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t first_ttvn;
/* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
uint8_t first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
__be16 coded_len;
};
+#pragma pack()
+
/**
* struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @reserved: reserved field (for packet alignment)
* @src: address of the source
* @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
* @align: 2 bytes to align the header to a 4 byte boundry
*/
struct batadv_unicast_tvlv_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t reserved;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
* struct batadv_tvlv_tt_change - translation table diff data
* @flags: status indicators concerning the non-mesh client (see
* batadv_tt_client_flags)
- * @reserved: reserved field
+ * @reserved: reserved field - useful for alignment purposes only
* @addr: mac address of non-mesh client that triggered this tt change
* @vid: VLAN identifier
*/
struct batadv_tvlv_tt_change {
uint8_t flags;
- uint8_t reserved;
+ uint8_t reserved[3];
uint8_t addr[ETH_ALEN];
__be16 vid;
};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index d4114d7..46278bf 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
memcpy(icmph->dst, icmph->orig, ETH_ALEN);
memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmph->msg_type = BATADV_ECHO_REPLY;
- icmph->header.ttl = BATADV_TTL;
+ icmph->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
- icmp_packet->icmph.orig, icmp_packet->icmph.dst);
+ icmp_packet->orig, icmp_packet->dst);
goto out;
}
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
goto out;
/* get routing information */
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
- memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
- memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+ memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+ memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
ETH_ALEN);
- icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
- icmp_packet->icmph.header.ttl = BATADV_TTL;
+ icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->ttl = BATADV_TTL;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
return batadv_recv_my_icmp_packet(bat_priv, skb);
/* TTL exceeded */
- if (icmph->header.ttl < 2)
+ if (icmph->ttl < 2)
return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
icmph = (struct batadv_icmp_header *)skb->data;
/* decrement ttl */
- icmph->header.ttl--;
+ icmph->ttl--;
/* route it */
if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* TTL exceeded */
- if (unicast_packet->header.ttl < 2) {
+ if (unicast_packet->ttl < 2) {
pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
ethhdr->h_source, unicast_packet->dest);
goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
/* decrement ttl */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.ttl--;
+ unicast_packet->ttl--;
- switch (unicast_packet->header.packet_type) {
+ switch (unicast_packet->packet_type) {
case BATADV_UNICAST_4ADDR:
hdr_len = sizeof(struct batadv_unicast_4addr_packet);
break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
+ is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
if (is4addr)
hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
goto out;
- if (bcast_packet->header.ttl < 2)
+ if (bcast_packet->ttl < 2)
goto out;
orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index c83be5e..fba4dcf 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
- unicast_packet->header.packet_type = BATADV_UNICAST;
+ unicast_packet->packet_type = BATADV_UNICAST;
/* set unicast ttl */
- unicast_packet->header.ttl = BATADV_TTL;
+ unicast_packet->ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
goto out;
uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+ uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
uc_4addr_packet->subtype = packet_subtype;
uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
/* as we have a copy now, it is safe to decrease the TTL */
bcast_packet = (struct batadv_bcast_packet *)newskb->data;
- bcast_packet->header.ttl--;
+ bcast_packet->ttl--;
skb_reset_mac_header(newskb);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 36f0508..a8f99d1 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
goto dropped;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
- bcast_packet->header.version = BATADV_COMPAT_VERSION;
- bcast_packet->header.ttl = BATADV_TTL;
+ bcast_packet->version = BATADV_COMPAT_VERSION;
+ bcast_packet->ttl = BATADV_TTL;
/* batman packet type: broadcast */
- bcast_packet->header.packet_type = BATADV_BCAST;
+ bcast_packet->packet_type = BATADV_BCAST;
bcast_packet->reserved = 0;
/* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct batadv_hard_iface *recv_if,
int hdr_size, struct batadv_orig_node *orig_node)
{
- struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
+ struct batadv_bcast_packet *batadv_bcast_packet;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
__be16 ethertype = htons(ETH_P_BATMAN);
struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
unsigned short vid;
bool is_bcast;
- is_bcast = (batadv_header->packet_type == BATADV_BCAST);
+ batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
- vid = batadv_get_vid(skb, hdr_size);
+ /* clean the netfilter state now that the batman-adv header has been
+ * removed
+ */
+ nf_reset(skb);
+
+ vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 4add57d..ff625fe 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
return;
tt_change_node->change.flags = flags;
- tt_change_node->change.reserved = 0;
+ memset(tt_change_node->change.reserved, 0,
+ sizeof(tt_change_node->change.reserved));
memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
tt_change_node->change.vid = htons(common->vid);
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
ETH_ALEN);
tt_change->flags = tt_common_entry->flags;
tt_change->vid = htons(tt_common_entry->vid);
- tt_change->reserved = 0;
+ memset(tt_change->reserved, 0,
+ sizeof(tt_change->reserved));
tt_num_entries++;
tt_change++;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 6a6c8bb..7552f9e 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
skb_pull(skb, 1);
- if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
- bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ /* No permission check is needed for user channel
+ * since that gets enforced when binding the socket.
+ *
+ * However check that the packet type is valid.
+ */
+ if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+ bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+ bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ err = -EINVAL;
+ goto drop;
+ }
+
+ skb_queue_tail(&hdev->raw_q, skb);
+ queue_work(hdev->workqueue, &hdev->tx_work);
+ } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
u16 opcode = get_unaligned_le16(skb->data);
u16 ogf = hci_opcode_ogf(opcode);
u16 ocf = hci_opcode_ocf(opcode);
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto drop;
}
- if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
- bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
- bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
- err = -EINVAL;
- goto drop;
- }
-
skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c214b2..ef66365 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
u32 old;
struct net_bridge_mdb_htable *mdb;
- spin_lock(&br->multicast_lock);
+ spin_lock_bh(&br->multicast_lock);
if (!netif_running(br->dev))
goto unlock;
@@ -2030,7 +2030,7 @@ rollback:
}
unlock:
- spin_unlock(&br->multicast_lock);
+ spin_unlock_bh(&br->multicast_lock);
return err;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index ba3b7ea..0ce469e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq, void *accel_priv)
+ struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
dev_queue_xmit_nit(skb, dev);
skb_len = skb->len;
- if (accel_priv)
- rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
- else
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
- if (rc == NETDEV_TX_OK && txq)
+ if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
}
@@ -2627,10 +2624,7 @@ gso:
dev_queue_xmit_nit(nskb, dev);
skb_len = nskb->len;
- if (accel_priv)
- rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
- else
- rc = ops->ndo_start_xmit(nskb, dev);
+ rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-int dev_queue_xmit(struct sk_buff *skb)
+int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
skb_update_prio(skb);
- txq = netdev_pick_tx(dev, skb);
+ txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
- rc = dev_hard_start_xmit(skb, dev, txq, NULL);
+ rc = dev_hard_start_xmit(skb, dev, txq);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
@@ -2892,8 +2886,19 @@ out:
rcu_read_unlock_bh();
return rc;
}
+
+int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
EXPORT_SYMBOL(dev_queue_xmit);
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+{
+ return __dev_queue_xmit(skb, accel_priv);
+}
+EXPORT_SYMBOL(dev_queue_xmit_accel);
+
/*=======================================================================
Receiver routines
@@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
{
struct netdev_adjacent *upper;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
diff --git a/net/core/filter.c b/net/core/filter.c
index 01b7808..ad30d62 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -36,7 +36,6 @@
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
-#include <linux/reciprocal_div.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A /= X;
continue;
case BPF_S_ALU_DIV_K:
- A = reciprocal_divide(A, K);
+ A /= K;
continue;
case BPF_S_ALU_MOD_X:
if (X == 0)
@@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
/* Some instructions need special checks */
switch (code) {
case BPF_S_ALU_DIV_K:
- /* check for division by zero */
- if (ftest->k == 0)
- return -EINVAL;
- ftest->k = reciprocal_value(ftest->k);
- break;
case BPF_S_ALU_MOD_K:
/* check for division by zero */
if (ftest->k == 0)
@@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
to->code = decodes[code];
to->jt = filt->jt;
to->jf = filt->jf;
-
- if (code == BPF_S_ALU_DIV_K) {
- /*
- * When loaded this rule user gave us X, which was
- * translated into R = r(X). Now we calculate the
- * RR = r(R) and report it back. If next time this
- * value is loaded and RRR = r(RR) is calculated
- * then the R == RRR will be true.
- *
- * One exception. X == 1 translates into R == 0 and
- * we can't calculate RR out of it with r().
- */
-
- if (filt->k == 0)
- to->k = 1;
- else
- to->k = reciprocal_value(filt->k);
-
- BUG_ON(reciprocal_value(to->k) != filt->k);
- } else
- to->k = filt->k;
+ to->k = filt->k;
}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6ef173..2fc5bea 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
EXPORT_SYMBOL(__netdev_pick_tx);
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ void *accel_priv)
{
int queue_index = 0;
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb);
+ queue_index = ops->ndo_select_queue(dev, skb,
+ accel_priv);
else
queue_index = __netdev_pick_tx(dev, skb);
- queue_index = dev_cap_txqueue(dev, queue_index);
+
+ if (!accel_priv)
+ queue_index = dev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 36b1443..932c6d7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1275,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
skb->len) < 0 &&
- dev->header_ops->rebuild(skb))
+ dev_rebuild_header(skb))
return 0;
return dev_queue_xmit(skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f97199..19fe9c7 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
- txq = netdev_pick_tx(dev, skb);
+ txq = netdev_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
!vlan_hw_offload_capable(netif_skb_features(skb),
skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
- if (unlikely(!skb))
- break;
+ if (unlikely(!skb)) {
+ /* This is actually a packet drop, but we
+ * don't want the code at the end of this
+ * function to try and re-queue a NULL skb.
+ */
+ status = NETDEV_TX_OK;
+ goto unlock_txq;
+ }
skb->vlan_tci = 0;
}
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
+ unlock_txq:
__netif_tx_unlock(txq);
if (status == NETDEV_TX_OK)
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 4c6bdf9..595ddf0 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
.llseek = noop_llseek,
};
-static __init int setup_jprobe(void)
-{
- int ret = register_jprobe(&dccp_send_probe);
-
- if (ret) {
- request_module("dccp");
- ret = register_jprobe(&dccp_send_probe);
- }
- return ret;
-}
-
static __init int dccpprobe_init(void)
{
int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
goto err0;
- ret = setup_jprobe();
+ ret = register_jprobe(&dccp_send_probe);
+ if (ret) {
+ ret = request_module("dccp");
+ if (!ret)
+ ret = register_jprobe(&dccp_send_probe);
+ }
+
if (ret)
goto err1;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 459e200..a2d2456 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
hc06_ptr += 3;
} else {
/* compress nothing */
- memcpy(hc06_ptr, &hdr, 4);
+ memcpy(hc06_ptr, hdr, 4);
/* replace the top byte with new ECN | DSCP format */
*hc06_ptr = tmp;
hc06_ptr += 4;
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index d08c7a4..89b265a 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
- if (type >= __IEEE802154_DEV_MAX)
- return -EINVAL;
+ if (type >= __IEEE802154_DEV_MAX) {
+ rc = -EINVAL;
+ goto nla_put_failure;
+ }
}
dev = phy->add_iface(phy, devname, type);
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index e5d4361..2cd02f3 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t enc_features;
int ghl = GRE_HEADER_SECTION;
struct gre_base_hdr *greh;
+ u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
__be16 protocol = skb->protocol;
int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
} else
csum = false;
+ if (unlikely(!pskb_may_pull(skb, ghl)))
+ goto out;
+
/* setup inner skb. */
skb->protocol = greh->protocol;
skb->encapsulation = 0;
- if (unlikely(!pskb_may_pull(skb, ghl)))
- goto out;
-
__skb_pull(skb, ghl);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, enc_features);
- if (!segs || IS_ERR(segs))
+ if (!segs || IS_ERR(segs)) {
+ skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
goto out;
+ }
skb = segs;
tnl_hlen = skb_tnl_header_len(skb);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 56a964a..e34dccb 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
+
r->id.idiag_if = tw->tw_bound_dev_if;
sock_diag_save_cookie(tw, r->id.idiag_cookie);
+
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
+
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = ireq->ir_rmt_port;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = ireq->ir_loc_addr;
r->id.idiag_dst[0] = ireq->ir_rmt_addr;
+
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
@@ -914,12 +930,15 @@ skip_listen_ht:
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
int res;
+ int state;
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
goto next_normal;
- if (!(r->idiag_states & (1 << sk->sk_state)))
+ state = (sk->sk_state == TCP_TIME_WAIT) ?
+ inet_twsk(sk)->tw_substate : sk->sk_state;
+ if (!(r->idiag_states & (1 << state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7aea4c..e560ef3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
+ skb_pop_mac_header(skb);
ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
return PACKET_RCVD;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9124027..df18461 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
- mtu-exthdrlen);
+ mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
mtu : 0xFFFF;
if (cork->length + size > maxnonfragsize - fragheaderlen) {
- ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
+ ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
+ mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 62212c7..1672409 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
- struct ipmr_result res;
- struct fib_lookup_arg arg = { .result = &res, };
int err;
+ struct ipmr_result res;
+ struct fib_lookup_arg arg = {
+ .result = &res,
+ .flags = FIB_LOOKUP_NOREF,
+ };
err = fib_rules_lookup(net->ipv4.mr_rules_ops,
flowi4_to_flowi(flp4), 0, &arg);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c4638e6..82de786 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1623,11 +1623,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
net_dma_find_channel()) {
- preempt_enable_no_resched();
+ preempt_enable();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
} else {
- preempt_enable_no_resched();
+ preempt_enable();
}
}
#endif
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 0649373..098b3a2 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -22,6 +22,9 @@
int sysctl_tcp_nometrics_save __read_mostly;
+static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
+ struct net *net, unsigned int hash);
+
struct tcp_fastopen_metrics {
u16 mss;
u16 syn_loss:10; /* Recurring Fast Open SYN losses */
@@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
}
}
+#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+
+static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+ if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+ tcpm_suck_dst(tm, dst, false);
+}
+
+#define TCP_METRICS_RECLAIM_DEPTH 5
+#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
struct inetpeer_addr *addr,
- unsigned int hash,
- bool reclaim)
+ unsigned int hash)
{
struct tcp_metrics_block *tm;
struct net *net;
+ bool reclaim = false;
spin_lock_bh(&tcp_metrics_lock);
net = dev_net(dst->dev);
+
+ /* While waiting for the spin-lock the cache might have been populated
+ * with this entry and so we have to check again.
+ */
+ tm = __tcp_get_metrics(addr, net, hash);
+ if (tm == TCP_METRICS_RECLAIM_PTR) {
+ reclaim = true;
+ tm = NULL;
+ }
+ if (tm) {
+ tcpm_check_stamp(tm, dst);
+ goto out_unlock;
+ }
+
if (unlikely(reclaim)) {
struct tcp_metrics_block *oldest;
@@ -169,17 +197,6 @@ out_unlock:
return tm;
}
-#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
-
-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
-{
- if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
- tcpm_suck_dst(tm, dst, false);
-}
-
-#define TCP_METRICS_RECLAIM_DEPTH 5
-#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
-
static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
{
if (tm)
@@ -282,7 +299,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
struct inetpeer_addr addr;
unsigned int hash;
struct net *net;
- bool reclaim;
addr.family = sk->sk_family;
switch (addr.family) {
@@ -304,13 +320,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
tm = __tcp_get_metrics(&addr, net, hash);
- reclaim = false;
- if (tm == TCP_METRICS_RECLAIM_PTR) {
- reclaim = true;
+ if (tm == TCP_METRICS_RECLAIM_PTR)
tm = NULL;
- }
if (!tm && create)
- tm = tcpm_new(dst, &addr, hash, reclaim);
+ tm = tcpm_new(dst, &addr, hash);
else
tcpm_check_stamp(tm, dst);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f140048..a7e4729 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2478,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
__be16 protocol = skb->protocol;
@@ -2497,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, enc_features);
- if (!segs || IS_ERR(segs))
+ if (!segs || IS_ERR(segs)) {
+ skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+ mac_len);
goto out;
+ }
outer_hlen = skb_tnl_header_len(skb);
skb = segs;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 83206de..79c62bd 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
+ int offset;
+ __wsum csum;
+
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
+ segs = skb_udp_tunnel_segment(skb, features);
+ goto out;
+ }
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out;
}
+ /* Do software UFO. Complete and fill in the UDP checksum as
+ * HW cannot do checksum of UDP packets sent as multiple
+ * IP fragments.
+ */
+ offset = skb_checksum_start_offset(skb);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
*/
- if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
- segs = skb_udp_tunnel_segment(skb, features);
- else {
- int offset;
- __wsum csum;
-
- /* Do software UFO. Complete and fill in the UDP checksum as
- * HW cannot do checksum of UDP packets sent as multiple
- * IP fragments.
- */
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
- skb->ip_summed = CHECKSUM_NONE;
-
- segs = skb_segment(skb, features);
- }
+ segs = skb_segment(skb, features);
out:
return segs;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d5fa5b8..4b6b720 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- if (ifp->prefix_len == 127) /* RFC 6164 */
+ if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- if (ifp->prefix_len == 127) /* RFC 6164 */
+ if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
struct inet6_ifaddr *ifp;
ifp = ipv6_add_addr(idev, addr, NULL, plen,
- scope, IFA_F_PERMANENT, 0, 0);
+ scope, IFA_F_PERMANENT,
+ INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
#endif
- ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
+ ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
+ INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp);
@@ -3187,6 +3189,22 @@ out:
in6_ifa_put(ifp);
}
+/* ifp->idev must be at least read locked */
+static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
+{
+ struct inet6_ifaddr *ifpiter;
+ struct inet6_dev *idev = ifp->idev;
+
+ list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
+ if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
+ (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
+ IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
+ IFA_F_PERMANENT)
+ return false;
+ }
+ return true;
+}
+
static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
{
struct net_device *dev = ifp->idev->dev;
@@ -3206,14 +3224,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
*/
read_lock_bh(&ifp->idev->lock);
- spin_lock(&ifp->lock);
- send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
- ifp->idev->valid_ll_addr_cnt == 1;
+ send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0;
- spin_unlock(&ifp->lock);
read_unlock_bh(&ifp->idev->lock);
/* While dad is in progress mld report's source address is in6_addrany.
@@ -3456,7 +3471,12 @@ restart:
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
- if (ifp->flags & IFA_F_PERMANENT)
+ /* When setting preferred_lft to a value not zero or
+ * infinity, while valid_lft is infinity
+ * IFA_F_PERMANENT has a non-infinity life time.
+ */
+ if ((ifp->flags & IFA_F_PERMANENT) &&
+ (ifp->prefered_lft == INFINITY_LIFE_TIME))
continue;
spin_lock(&ifp->lock);
@@ -3481,7 +3501,8 @@ restart:
ifp->flags |= IFA_F_DEPRECATED;
}
- if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
+ if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
+ (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
next = ifp->tstamp + ifp->valid_lft * HZ;
spin_unlock(&ifp->lock);
@@ -3761,7 +3782,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
- if (!(ifa->flags&IFA_F_PERMANENT)) {
+ if (!((ifa->flags&IFA_F_PERMANENT) &&
+ (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
@@ -4503,19 +4525,6 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
}
-static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
-{
- write_lock_bh(&ifp->idev->lock);
- spin_lock(&ifp->lock);
- if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
- IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
- (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
- ifp->idev->valid_ll_addr_cnt += count;
- WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
- spin_unlock(&ifp->lock);
- write_unlock_bh(&ifp->idev->lock);
-}
-
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
{
struct net *net = dev_net(ifp->idev->dev);
@@ -4524,8 +4533,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
switch (event) {
case RTM_NEWADDR:
- update_valid_ll_addr_cnt(ifp, 1);
-
/*
* If the address was optimistic
* we inserted the route at the start of
@@ -4541,8 +4548,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
ifp->idev->dev, 0, 0);
break;
case RTM_DELADDR:
- update_valid_ll_addr_cnt(ifp, -1);
-
if (ifp->idev->cnf.forwarding)
addrconf_leave_anycast(ifp);
addrconf_leave_solict(ifp->idev, &ifp->addr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4acdb63..e6f9319 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
(opt ? opt->opt_nflen : 0);
- maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
+ maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+ sizeof(struct frag_hdr);
if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
- if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
- ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
+ unsigned int maxnonfragsize, headersize;
+
+ headersize = sizeof(struct ipv6hdr) +
+ (opt ? opt->tot_len : 0) +
+ (dst_allfrag(&rt->dst) ?
+ sizeof(struct frag_hdr) : 0) +
+ rt->rt6i_nfheader_len;
+
+ maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
+ mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+
+ /* dontfrag active */
+ if ((cork->length + length > mtu - headersize) && dontfrag &&
+ (sk->sk_protocol == IPPROTO_UDP ||
+ sk->sk_protocol == IPPROTO_RAW)) {
+ ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
+ sizeof(struct ipv6hdr));
+ goto emsgsize;
+ }
+
+ if (cork->length + length > maxnonfragsize - headersize) {
+emsgsize:
+ ipv6_local_error(sk, EMSGSIZE, fl6,
+ mtu - headersize +
+ sizeof(struct ipv6hdr));
return -EMSGSIZE;
}
}
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
* --yoshfuji
*/
- if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
- sk->sk_protocol == IPPROTO_RAW)) {
- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
- return -EMSGSIZE;
- }
-
skb = skb_peek_tail(&sk->sk_write_queue);
cork->length += length;
if (((length > mtu) ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d606232..7881965 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -103,16 +103,25 @@ struct ip6_tnl_net {
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
- struct pcpu_tstats sum = { 0 };
+ struct pcpu_tstats tmp, sum = { 0 };
int i;
for_each_possible_cpu(i) {
+ unsigned int start;
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ tmp.rx_packets = tstats->rx_packets;
+ tmp.rx_bytes = tstats->rx_bytes;
+ tmp.tx_packets = tstats->tx_packets;
+ tmp.tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ sum.rx_packets += tmp.rx_packets;
+ sum.rx_bytes += tmp.rx_bytes;
+ sum.tx_packets += tmp.tx_packets;
+ sum.tx_bytes += tmp.tx_bytes;
}
dev->stats.rx_packets = sum.rx_packets;
dev->stats.rx_bytes = sum.rx_bytes;
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
}
tstats = this_cpu_ptr(t->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed94ba6..7b42d5e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -75,26 +75,6 @@ struct vti6_net {
struct ip6_tnl __rcu **tnls[2];
};
-static struct net_device_stats *vti6_get_stats(struct net_device *dev)
-{
- struct pcpu_tstats sum = { 0 };
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
- }
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
-}
-
#define for_each_vti6_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(t->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
skb->mark = 0;
secpath_reset(skb);
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = {
.ndo_start_xmit = vti6_tnl_xmit,
.ndo_do_ioctl = vti6_ioctl,
.ndo_change_mtu = vti6_change_mtu,
- .ndo_get_stats = vti6_get_stats,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
};
/**
@@ -750,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev)
static inline int vti6_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
+ int i;
t->dev = dev;
t->net = dev_net(dev);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *stats;
+ stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&stats->syncp);
+ }
return 0;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f365310..0eb4038 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr6_table **mrt)
{
- struct ip6mr_result res;
- struct fib_lookup_arg arg = { .result = &res, };
int err;
+ struct ip6mr_result res;
+ struct fib_lookup_arg arg = {
+ .result = &res,
+ .flags = FIB_LOOKUP_NOREF,
+ };
err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
flowi6_to_flowi(flp6), 0, &arg);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a0a48ac..4b4944c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1905,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
else
rt->rt6i_gateway = *dest;
rt->rt6i_flags = ort->rt6i_flags;
- if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
- (RTF_DEFAULT | RTF_ADDRCONF))
- rt6_set_from(rt, ort);
+ rt6_set_from(rt, ort);
rt->rt6i_metric = 0;
#ifdef CONFIG_IPV6_SUBTREES
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 366fbba..d3005b3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
@@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (tunnel->parms.iph.daddr && skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
@@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
+ ip_rt_put(rt);
goto out;
+ }
err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
ttl, df, !net_eq(tunnel->net, dev_net(dev)));
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7b01b9f..c71b699 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
- u32 *seq;
+ u32 *seq, skb_len;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
continue;
found_ok_skb:
+ skb_len = skb->len;
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
/* Partial read */
- if (used + offset < skb->len)
+ if (used + offset < skb_len)
continue;
} while (len > 0);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 36c3a4c..a075791 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev)
}
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ void *accel_priv)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
};
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ void *accel_priv)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c558b24..ca7fa7f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
- /* only deauth, disassoc and action are bufferable MMPDUs */
- if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_deauth(hdr->frame_control) &&
- !ieee80211_is_disassoc(hdr->frame_control) &&
- !ieee80211_is_action(hdr->frame_control)) {
- info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
- return TX_CONTINUE;
- }
-
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
return TX_CONTINUE;
+ /* only deauth, disassoc and action are bufferable MMPDUs */
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control) &&
+ !ieee80211_is_action(hdr->frame_control)) {
+ if (tx->flags & IEEE80211_TX_UNICAST)
+ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+ return TX_CONTINUE;
+ }
+
if (tx->flags & IEEE80211_TX_UNICAST)
return ieee80211_tx_h_unicast_ps_buf(tx);
else
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index c8beafd..5a355a4 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -63,6 +63,7 @@
#include <net/ip_vs.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return;
+ /* Applications may adjust TCP seqs */
+ if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
+ !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
+ return;
+
/*
* The connection is not yet in the hashtable, so we update it.
* CIP->VIP will remain the same, so leave the tuple in
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index 17c1bcb..f6e2ae9 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
if (off == 0)
return 0;
+ if (unlikely(!seqadj)) {
+ WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
+ return 0;
+ }
+
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 902fb0a..7a394df 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
void nf_conntrack_tstamp_pernet_fini(struct net *net)
{
nf_conntrack_tstamp_fini_sysctl(net);
- nf_ct_extend_unregister(&tstamp_extend);
}
int nf_conntrack_tstamp_init(void)
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index f02b360..1fb2258 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
struct nf_conntrack_expect *exp)
{
char buffer[sizeof("4294967296 65635")];
+ struct nf_conn *ct = exp->master;
+ union nf_inet_addr newaddr;
u_int16_t port;
unsigned int ret;
/* Reply comes from server. */
+ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
+
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
}
if (port == 0) {
- nf_ct_helper_log(skb, exp->master, "all ports in use");
+ nf_ct_helper_log(skb, ct, "all ports in use");
return NF_DROP;
}
- ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
- protoff, matchoff, matchlen, buffer,
- strlen(buffer));
+ /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
+ * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
+ * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
+ * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
+ * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
+ *
+ * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
+ * 255.255.255.255==4294967296, 10 digits)
+ * P: bound port (min 1 d, max 5d (65635))
+ * F: filename (min 1 d )
+ * S: size (min 1 d )
+ * 0x01, \n: terminators
+ */
+ /* AAA = "us", ie. where server normally talks to. */
+ snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
+ pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
+ buffer, &newaddr.ip, port);
+
+ ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
+ matchlen, buffer, strlen(buffer));
if (ret != NF_ACCEPT) {
- nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
+ nf_ct_helper_log(skb, ct, "cannot mangle packet");
nf_ct_unexpect_related(exp);
}
+
return ret;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index f93b7d0..71a9f49 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table)
int err, i = 0;
list_for_each_entry(chain, &table->chains, list) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ continue;
+
err = nf_register_hook(&nft_base_chain(chain)->ops);
if (err < 0)
goto err;
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table)
return 0;
err:
list_for_each_entry(chain, &table->chains, list) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ continue;
+
if (i-- <= 0)
break;
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table)
{
struct nft_chain *chain;
- list_for_each_entry(chain, &table->chains, list)
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ list_for_each_entry(chain, &table->chains, list) {
+ if (chain->flags & NFT_BASE_CHAIN)
+ nf_unregister_hook(&nft_base_chain(chain)->ops);
+ }
return 0;
}
@@ -2098,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nft_set *set;
- unsigned int idx = 0, s_idx = cb->args[0];
+ unsigned int idx, s_idx = cb->args[0];
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
if (cb->args[1])
return skb->len;
list_for_each_entry(table, &ctx->afi->tables, list) {
- if (cur_table && cur_table != table)
- continue;
+ if (cur_table) {
+ if (cur_table != table)
+ continue;
+ cur_table = NULL;
+ }
ctx->table = table;
+ idx = 0;
list_for_each_entry(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
@@ -2370,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
enum nft_registers dreg;
dreg = nft_type_to_reg(set->dtype);
- return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+ return nft_validate_data_load(ctx, dreg, &elem->data,
+ set->dtype == NFT_DATA_VERDICT ?
+ NFT_DATA_VERDICT : NFT_DATA_VALUE);
}
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c4b69e..a155d19 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
#endif
+ nf_log_unset(net, &nfulnl_logger);
}
static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 8e0bb75..55c939f 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
{
struct nft_exthdr *priv = nft_expr_priv(expr);
struct nft_data *dest = &data[priv->dreg];
- unsigned int offset;
+ unsigned int offset = 0;
int err;
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 8725291..83b9927 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
{
dev->dep_link_up = true;
- if (!dev->active_target) {
+ if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
struct nfc_target *target;
target = nfc_find_target(dev, target_idx);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b4c8b00..ba2dffe 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
+ if (ret || !cm_id->device ||
+ cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8eb9501..b7ebe23 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
struct rds_ib_refill_cache *cache)
{
unsigned long flags;
- struct list_head *old;
- struct list_head __percpu *chpfirst;
+ struct list_head *old, *chpfirst;
local_irq_save(flags);
@@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
else /* put on front */
list_add_tail(new_item, chpfirst);
- __this_cpu_write(chpfirst, new_item);
+ __this_cpu_write(cache->percpu->first, new_item);
__this_cpu_inc(cache->percpu->count);
if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
@@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
} while (old);
- __this_cpu_write(chpfirst, NULL);
+ __this_cpu_write(cache->percpu->first, NULL);
__this_cpu_write(cache->percpu->count, 0);
end:
local_irq_restore(flags);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 33af772..62ced65 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_name) {
struct sockaddr_rose *srose;
+ struct full_sockaddr_rose *full_srose = msg->msg_name;
memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
- for (n = 0 ; n < rose->dest_ndigis ; n++)
- full_srose->srose_digis[n] = rose->dest_digis[n];
- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
- } else {
- if (rose->dest_ndigis >= 1) {
- srose->srose_ndigis = 1;
- srose->srose_digi = rose->dest_digis[0];
- }
- msg->msg_namelen = sizeof(struct sockaddr_rose);
- }
+ for (n = 0 ; n < rose->dest_ndigis ; n++)
+ full_srose->srose_digis[n] = rose->dest_digis[n];
+ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
}
skb_free_datagram(sk, skb);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 5c5edf5..11fe1a4 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
&csum_idx_gen, &csum_hash_info);
if (IS_ERR(pc))
return PTR_ERR(pc);
- p = to_tcf_csum(pc);
ret = ACT_P_CREATED;
} else {
- p = to_tcf_csum(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &csum_hash_info);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_hash_release(pc, bind, &csum_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
+ p = to_tcf_csum(pc);
spin_lock_bh(&p->tcf_lock);
p->tcf_action = parm->action;
p->update_flags = parm->update_flags;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 5645a4d..eb9ba60 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
- if (!ovr) {
- tcf_hash_release(pc, bind, &gact_hash_info);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_hash_release(pc, bind, &gact_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
gact = to_gact(pc);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 882a897..dcbfe8c 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
- if (!ovr) {
- tcf_ipt_release(to_ipt(pc), bind);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_ipt_release(to_ipt(pc), bind);
+
+ if (!ovr)
return -EEXIST;
- }
}
ipt = to_ipt(pc);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 6a15ace..7686953 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
&nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc))
return PTR_ERR(pc);
- p = to_tcf_nat(pc);
ret = ACT_P_CREATED;
} else {
- p = to_tcf_nat(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &nat_hash_info);
+ if (bind)
+ return 0;
+ tcf_hash_release(pc, bind, &nat_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
+ p = to_tcf_nat(pc);
spin_lock_bh(&p->tcf_lock);
p->old_addr = parm->old_addr;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 03b6767..7aa2dcd 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
p = to_pedit(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &pedit_hash_info);
+ tcf_hash_release(pc, bind, &pedit_hash_info);
+ if (bind)
+ return 0;
+ if (!ovr)
return -EEXIST;
- }
+
if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 16a62c3..ef246d8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
if (bind) {
police->tcf_bindcnt += 1;
police->tcf_refcnt += 1;
+ return 0;
}
if (ovr)
goto override;
- return ret;
+ /* not replacing */
+ return -EEXIST;
}
}
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 31157d3..f7b45ab 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
d = to_defact(pc);
- if (!ovr) {
- tcf_simp_release(d, bind);
+
+ if (bind)
+ return 0;
+ tcf_simp_release(d, bind);
+ if (!ovr)
return -EEXIST;
- }
+
reset_policy(d, defdata, parm);
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 35ea643..8fe9d25 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
d = to_skbedit(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &skbedit_hash_info);
+ if (bind)
+ return 0;
+ tcf_hash_release(pc, bind, &skbedit_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
spin_lock_bh(&d->tcf_lock);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 922a094..7fc899a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
- ret = dev_hard_start_xmit(skb, dev, txq, NULL);
+ ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index f51ba98..59268f6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
-
- q->empty = 1;
}
/* Free the outqueue structure and any related pending chunks.
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
else
SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
- q->empty = 0;
break;
}
} else {
@@ -654,7 +651,6 @@ redo:
if (chunk->fast_retransmit == SCTP_NEED_FRTX)
chunk->fast_retransmit = SCTP_DONT_FRTX;
- q->empty = 0;
q->asoc->stats.rtxchunks++;
break;
}
@@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
sctp_transport_reset_timers(transport);
- q->empty = 0;
-
/* Only let one DATA chunk get bundled with a
* COOKIE-ECHO chunk.
*/
@@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
"advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
asoc->adv_peer_ack_point);
- /* See if all chunks are acked.
- * Make sure the empty queue handler will get run later.
- */
- q->empty = (list_empty(&q->out_chunk_list) &&
- list_empty(&q->retransmit));
- if (!q->empty)
- goto finish;
-
- list_for_each_entry(transport, transport_list, transports) {
- q->empty = q->empty && list_empty(&transport->transmitted);
- if (!q->empty)
- goto finish;
- }
-
- pr_debug("%s: sack queue is empty\n", __func__);
-finish:
- return q->empty;
+ return sctp_outq_is_empty(q);
}
-/* Is the outqueue empty? */
+/* Is the outqueue empty?
+ * The queue is empty when we have not pending data, no in-flight data
+ * and nothing pending retransmissions.
+ */
int sctp_outq_is_empty(const struct sctp_outq *q)
{
- return q->empty;
+ return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
+ list_empty(&q->retransmit);
}
/********************************************************************
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 69cd9bf..13b9877 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
int type;
head = head->next;
+ buf->next = NULL;
/* Ensure bearer is still enabled */
if (unlikely(!b_ptr->active))
diff --git a/net/tipc/port.c b/net/tipc/port.c
index c081a76..d43f318 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
return p_ptr;
}
-int tipc_deleteport(u32 ref)
+int tipc_deleteport(struct tipc_port *p_ptr)
{
- struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
- tipc_withdraw(ref, 0, NULL);
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
+ tipc_withdraw(p_ptr, 0, NULL);
- tipc_ref_discard(ref);
- tipc_port_unlock(p_ptr);
+ spin_lock_bh(p_ptr->lock);
+ tipc_ref_discard(p_ptr->ref);
+ spin_unlock_bh(p_ptr->lock);
k_cancel_timer(&p_ptr->timer);
if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
}
-int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct tipc_port *p_ptr;
struct publication *publ;
u32 key;
- int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
+ if (p_ptr->connected)
return -EINVAL;
+ key = p_ptr->ref + p_ptr->pub_count + 1;
+ if (key == p_ptr->ref)
+ return -EADDRINUSE;
- if (p_ptr->connected)
- goto exit;
- key = ref + p_ptr->pub_count + 1;
- if (key == ref) {
- res = -EADDRINUSE;
- goto exit;
- }
publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
scope, p_ptr->ref, key);
if (publ) {
list_add(&publ->pport_list, &p_ptr->publications);
p_ptr->pub_count++;
p_ptr->published = 1;
- res = 0;
+ return 0;
}
-exit:
- tipc_port_unlock(p_ptr);
- return res;
+ return -EINVAL;
}
-int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct tipc_port *p_ptr;
struct publication *publ;
struct publication *tpubl;
int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
if (list_empty(&p_ptr->publications))
p_ptr->published = 0;
- tipc_port_unlock(p_ptr);
return res;
}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 9122535..34f12bd 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
void tipc_acknowledge(u32 port_ref, u32 ack);
-int tipc_deleteport(u32 portref);
+int tipc_deleteport(struct tipc_port *p_ptr);
int tipc_portimportance(u32 portref, unsigned int *importance);
int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
-int tipc_publish(u32 portref, unsigned int scope,
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
int tipc_connect(u32 portref, struct tipc_portid const *port);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b61851..e741416 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -354,7 +354,7 @@ static int release(struct socket *sock)
* Delete TIPC port; this ensures no more messages are queued
* (also disconnects an active connection & sends a 'FIN-' to peer)
*/
- res = tipc_deleteport(tport->ref);
+ res = tipc_deleteport(tport);
/* Discard any remaining (connection-based) messages in receive queue */
__skb_queue_purge(&sk->sk_receive_queue);
@@ -386,30 +386,46 @@ static int release(struct socket *sock)
*/
static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
{
+ struct sock *sk = sock->sk;
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- u32 portref = tipc_sk_port(sock->sk)->ref;
+ struct tipc_port *tport = tipc_sk_port(sock->sk);
+ int res = -EINVAL;
- if (unlikely(!uaddr_len))
- return tipc_withdraw(portref, 0, NULL);
+ lock_sock(sk);
+ if (unlikely(!uaddr_len)) {
+ res = tipc_withdraw(tport, 0, NULL);
+ goto exit;
+ }
- if (uaddr_len < sizeof(struct sockaddr_tipc))
- return -EINVAL;
- if (addr->family != AF_TIPC)
- return -EAFNOSUPPORT;
+ if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+ res = -EINVAL;
+ goto exit;
+ }
+ if (addr->family != AF_TIPC) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if (addr->addrtype == TIPC_ADDR_NAME)
addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
- return -EAFNOSUPPORT;
+ else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
(addr->addr.nameseq.type != TIPC_TOP_SRV) &&
- (addr->addr.nameseq.type != TIPC_CFG_SRV))
- return -EACCES;
+ (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+ res = -EACCES;
+ goto exit;
+ }
- return (addr->scope > 0) ?
- tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
- tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+ res = (addr->scope > 0) ?
+ tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
+ tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+exit:
+ release_sock(sk);
+ return res;
}
/**
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index a271c27..722da61 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
/* find payload start allowing for extended bitmap(s) */
if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
+ if ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
+ (unsigned long)iterator->_max_length)
+ return -EINVAL;
while (get_unaligned_le32(iterator->_arg) &
(1 << IEEE80211_RADIOTAP_EXT)) {
iterator->_arg += sizeof(uint32_t);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 65f8008..d3c5bd7 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
}
#endif
+ if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+ WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+ bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+ wdev->ssid, wdev->ssid_len,
+ WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (bss)
+ cfg80211_hold_bss(bss_from_pub(bss));
+ }
+
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
return;
}
- if (!bss) {
- WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
- wdev->ssid, wdev->ssid_len,
- WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
- if (WARN_ON(!bss))
- return;
- cfg80211_hold_bss(bss_from_pub(bss));
- }
+ if (WARN_ON(!bss))
+ return;
wdev->current_bss = bss_from_pub(bss);
diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
index d0c687f..5dce351 100644
--- a/samples/kobject/kset-example.c
+++ b/samples/kobject/kset-example.c
@@ -262,6 +262,7 @@ baz_error:
bar_error:
destroy_foo_obj(foo_obj);
foo_error:
+ kset_unregister(example_kset);
return -EINVAL;
}
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
index a2af2e8..c9469d3 100644
--- a/scripts/gcc-goto.sh
+++ b/scripts/gcc-goto.sh
@@ -5,7 +5,7 @@
cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
int main(void)
{
-#ifdef __arm__
+#if defined(__arm__) || defined(__aarch64__)
/*
* Not related to asm goto, but used by jump label
* and broken on some ARM GCC versions (see GCC Bug 48637).
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 4606cdf..3133172 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -219,6 +219,13 @@ sub read_kconfig {
$depends{$config} = $1;
} elsif ($state eq "DEP" && /^\s*depends\s+on\s+(.*)$/) {
$depends{$config} .= " " . $1;
+ } elsif ($state eq "DEP" && /^\s*def(_(bool|tristate)|ault)\s+(\S.*)$/) {
+ my $dep = $3;
+ if ($dep !~ /^\s*(y|m|n)\s*$/) {
+ $dep =~ s/.*\sif\s+//;
+ $depends{$config} .= " " . $dep;
+ dprint "Added default depends $dep to $config\n";
+ }
# Get the configs that select this config
} elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) {
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 32b10f5..2dcb377 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,7 +82,9 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
- kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+ if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
+ kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+ fi
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 419491d..57b0b49 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -234,6 +234,14 @@ static int inode_alloc_security(struct inode *inode)
return 0;
}
+static void inode_free_rcu(struct rcu_head *head)
+{
+ struct inode_security_struct *isec;
+
+ isec = container_of(head, struct inode_security_struct, rcu);
+ kmem_cache_free(sel_inode_cache, isec);
+}
+
static void inode_free_security(struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
@@ -244,8 +252,16 @@ static void inode_free_security(struct inode *inode)
list_del_init(&isec->list);
spin_unlock(&sbsec->isec_lock);
- inode->i_security = NULL;
- kmem_cache_free(sel_inode_cache, isec);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to selinux_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ call_rcu(&isec->rcu, inode_free_rcu);
}
static int file_alloc_security(struct file *file)
@@ -4334,8 +4350,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
- if (err)
+ if (err) {
selinux_netlbl_err(skb, err, 0);
+ return err;
+ }
}
if (secmark_active) {
@@ -5586,11 +5604,11 @@ static int selinux_setprocattr(struct task_struct *p,
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
- task_lock(p);
+ rcu_read_lock();
tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
- task_unlock(p);
+ rcu_read_unlock();
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index b1dfe10..078e553 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,7 +38,10 @@ struct task_security_struct {
struct inode_security_struct {
struct inode *inode; /* back pointer to inode object */
- struct list_head list; /* list of inode_security_struct */
+ union {
+ struct list_head list; /* list of inode_security_struct */
+ struct rcu_head rcu; /* for freeing the inode_security_struct */
+ };
u32 task_sid; /* SID of creating task */
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
diff --git a/tools/Makefile b/tools/Makefile
index a9b0200..927cd46 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -39,10 +39,10 @@ cpupower: FORCE
cgroup firewire guest usb virtio vm net: FORCE
$(call descend,$@)
-liblk: FORCE
- $(call descend,lib/lk)
+libapikfs: FORCE
+ $(call descend,lib/api)
-perf: liblk FORCE
+perf: libapikfs FORCE
$(call descend,$@)
selftests: FORCE
@@ -80,10 +80,10 @@ cpupower_clean:
cgroup_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
$(call descend,$(@:_clean=),clean)
-liblk_clean:
- $(call descend,lib/lk,clean)
+libapikfs_clean:
+ $(call descend,lib/api,clean)
-perf_clean: liblk_clean
+perf_clean: libapikfs_clean
$(call descend,$(@:_clean=),clean)
selftests_clean:
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index b8d6d54..4088b81 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -26,7 +26,6 @@
#include <sys/socket.h>
#include <sys/poll.h>
#include <sys/utsname.h>
-#include <linux/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 8bcb040..520de3304 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -22,7 +22,6 @@
#include <sys/socket.h>
#include <sys/poll.h>
#include <sys/ioctl.h>
-#include <linux/types.h>
#include <fcntl.h>
#include <stdio.h>
#include <mntent.h>
diff --git a/tools/perf/util/include/asm/bug.h b/tools/include/asm/bug.h
index 7fcc681..9e5f484 100644
--- a/tools/perf/util/include/asm/bug.h
+++ b/tools/include/asm/bug.h
@@ -1,5 +1,7 @@
-#ifndef _PERF_ASM_GENERIC_BUG_H
-#define _PERF_ASM_GENERIC_BUG_H
+#ifndef _TOOLS_ASM_BUG_H
+#define _TOOLS_ASM_BUG_H
+
+#include <linux/compiler.h>
#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
@@ -19,4 +21,5 @@
__warned = 1; \
unlikely(__ret_warn_once); \
})
-#endif
+
+#endif /* _TOOLS_ASM_BUG_H */
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/include/linux/compiler.h
index b003ad7..fbc6665 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_LINUX_COMPILER_H_
-#define _PERF_LINUX_COMPILER_H_
+#ifndef _TOOLS_LINUX_COMPILER_H_
+#define _TOOLS_LINUX_COMPILER_H_
#ifndef __always_inline
# define __always_inline inline __attribute__((always_inline))
@@ -27,4 +27,12 @@
# define __weak __attribute__((weak))
#endif
+#ifndef likely
+# define likely(x) __builtin_expect(!!(x), 1)
#endif
+
+#ifndef unlikely
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/lib/lk/Makefile b/tools/lib/api/Makefile
index 3dba0a4..ed2f51e 100644
--- a/tools/lib/lk/Makefile
+++ b/tools/lib/api/Makefile
@@ -1,4 +1,5 @@
include ../../scripts/Makefile.include
+include ../../perf/config/utilities.mak # QUIET_CLEAN
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
@@ -7,11 +8,11 @@ AR = $(CROSS_COMPILE)ar
LIB_H=
LIB_OBJS=
-LIB_H += debugfs.h
+LIB_H += fs/debugfs.h
-LIB_OBJS += $(OUTPUT)debugfs.o
+LIB_OBJS += $(OUTPUT)fs/debugfs.o
-LIBFILE = liblk.a
+LIBFILE = libapikfs.a
CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
EXTLIBS = -lelf -lpthread -lrt -lm
@@ -25,14 +26,17 @@ $(LIBFILE): $(LIB_OBJS)
$(LIB_OBJS): $(LIB_H)
-$(OUTPUT)%.o: %.c
+libapi_dirs:
+ $(QUIET_MKDIR)mkdir -p $(OUTPUT)fs/
+
+$(OUTPUT)%.o: %.c libapi_dirs
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
-$(OUTPUT)%.s: %.c
+$(OUTPUT)%.s: %.c libapi_dirs
$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
-$(OUTPUT)%.o: %.S
+$(OUTPUT)%.o: %.S libapi_dirs
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
clean:
- $(RM) $(LIB_OBJS) $(LIBFILE)
+ $(call QUIET_CLEAN, libapi) $(RM) $(LIB_OBJS) $(LIBFILE)
.PHONY: clean
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/api/fs/debugfs.c
index 7c43479..7c43479 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
diff --git a/tools/lib/lk/debugfs.h b/tools/lib/api/fs/debugfs.h
index 935c59b..f19d3df 100644
--- a/tools/lib/lk/debugfs.h
+++ b/tools/lib/api/fs/debugfs.h
@@ -1,5 +1,5 @@
-#ifndef __LK_DEBUGFS_H__
-#define __LK_DEBUGFS_H__
+#ifndef __API_DEBUGFS_H__
+#define __API_DEBUGFS_H__
#define _STR(x) #x
#define STR(x) _STR(x)
@@ -26,4 +26,4 @@ char *debugfs_mount(const char *mountpoint);
extern char debugfs_mountpoint[];
-#endif /* __LK_DEBUGFS_H__ */
+#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
new file mode 100644
index 0000000..da8b7aa
--- /dev/null
+++ b/tools/lib/lockdep/Makefile
@@ -0,0 +1,251 @@
+# liblockdep version
+LL_VERSION = 0
+LL_PATCHLEVEL = 0
+LL_EXTRAVERSION = 1
+
+# file format version
+FILE_VERSION = 1
+
+MAKEFLAGS += --no-print-directory
+
+
+# Makefiles suck: This macro sets a default value of $(2) for the
+# variable named by $(1), unless the variable has been set by
+# environment or command line. This is necessary for CC and AR
+# because make sets default values, so the simpler ?= approach
+# won't work as expected.
+define allow-override
+ $(if $(or $(findstring environment,$(origin $(1))),\
+ $(findstring command line,$(origin $(1)))),,\
+ $(eval $(1) = $(2)))
+endef
+
+# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,AR,$(CROSS_COMPILE)ar)
+
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+prefix ?= /usr/local
+libdir_relative = lib
+libdir = $(prefix)/$(libdir_relative)
+bindir_relative = bin
+bindir = $(prefix)/$(bindir_relative)
+
+export DESTDIR DESTDIR_SQ INSTALL
+
+# copy a bit from Linux kbuild
+
+ifeq ("$(origin V)", "command line")
+ VERBOSE = $(V)
+endif
+ifndef VERBOSE
+ VERBOSE = 0
+endif
+
+ifeq ("$(origin O)", "command line")
+ BUILD_OUTPUT := $(O)
+endif
+
+ifeq ($(BUILD_SRC),)
+ifneq ($(BUILD_OUTPUT),)
+
+define build_output
+ $(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
+ BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
+endef
+
+saved-output := $(BUILD_OUTPUT)
+BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
+$(if $(BUILD_OUTPUT),, \
+ $(error output directory "$(saved-output)" does not exist))
+
+all: sub-make
+
+gui: force
+ $(call build_output, all_cmd)
+
+$(filter-out gui,$(MAKECMDGOALS)): sub-make
+
+sub-make: force
+ $(call build_output, $(MAKECMDGOALS))
+
+
+# Leave processing to above invocation of make
+skip-makefile := 1
+
+endif # BUILD_OUTPUT
+endif # BUILD_SRC
+
+# We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(skip-makefile),)
+
+srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
+objtree := $(CURDIR)
+src := $(srctree)
+obj := $(objtree)
+
+export prefix libdir bindir src obj
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+bindir_SQ = $(subst ','\'',$(bindir))
+
+LIB_FILE = liblockdep.a liblockdep.so
+BIN_FILE = lockdep
+
+CONFIG_INCLUDES =
+CONFIG_LIBS =
+CONFIG_FLAGS =
+
+OBJ = $@
+N =
+
+export Q VERBOSE
+
+LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION)
+
+INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES)
+
+# Set compile option CFLAGS if not set elsewhere
+CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
+
+override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
+
+ifeq ($(VERBOSE),1)
+ Q =
+ print_compile =
+ print_app_build =
+ print_fpic_compile =
+ print_shared_lib_compile =
+ print_install =
+else
+ Q = @
+ print_compile = echo ' CC '$(OBJ);
+ print_app_build = echo ' BUILD '$(OBJ);
+ print_fpic_compile = echo ' CC FPIC '$(OBJ);
+ print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
+ print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
+ print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
+endif
+
+do_fpic_compile = \
+ ($(print_fpic_compile) \
+ $(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
+
+do_app_build = \
+ ($(print_app_build) \
+ $(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
+
+do_compile_shared_library = \
+ ($(print_shared_lib_compile) \
+ $(CC) --shared $^ -o $@ -lpthread -ldl)
+
+do_build_static_lib = \
+ ($(print_static_lib_build) \
+ $(RM) $@; $(AR) rcs $@ $^)
+
+
+define do_compile
+ $(print_compile) \
+ $(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
+endef
+
+$(obj)/%.o: $(src)/%.c
+ $(Q)$(call do_compile)
+
+%.o: $(src)/%.c
+ $(Q)$(call do_compile)
+
+PEVENT_LIB_OBJS = common.o lockdep.o preload.o rbtree.o
+
+ALL_OBJS = $(PEVENT_LIB_OBJS)
+
+CMD_TARGETS = $(LIB_FILE)
+
+TARGETS = $(CMD_TARGETS)
+
+
+all: all_cmd
+
+all_cmd: $(CMD_TARGETS)
+
+liblockdep.so: $(PEVENT_LIB_OBJS)
+ $(Q)$(do_compile_shared_library)
+
+liblockdep.a: $(PEVENT_LIB_OBJS)
+ $(Q)$(do_build_static_lib)
+
+$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
+ $(Q)$(do_fpic_compile)
+
+## make deps
+
+all_objs := $(sort $(ALL_OBJS))
+all_deps := $(all_objs:%.o=.%.d)
+
+# let .d file also depends on the source and header files
+define check_deps
+ @set -e; $(RM) $@; \
+ $(CC) -MM $(CFLAGS) $< > $@.$$$$; \
+ sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
+ $(RM) $@.$$$$
+endef
+
+$(all_deps): .%.d: $(src)/%.c
+ $(Q)$(call check_deps)
+
+$(all_objs) : %.o : .%.d
+
+dep_includes := $(wildcard $(all_deps))
+
+ifneq ($(dep_includes),)
+ include $(dep_includes)
+endif
+
+### Detect environment changes
+TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
+
+tags: force
+ $(RM) tags
+ find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
+ --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
+
+TAGS: force
+ $(RM) TAGS
+ find . -name '*.[ch]' | xargs etags \
+ --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
+
+define do_install
+ $(print_install) \
+ if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
+ fi; \
+ $(INSTALL) $1 '$(DESTDIR_SQ)$2'
+endef
+
+install_lib: all_cmd
+ $(Q)$(call do_install,$(LIB_FILE),$(libdir_SQ))
+ $(Q)$(call do_install,$(BIN_FILE),$(bindir_SQ))
+
+install: install_lib
+
+clean:
+ $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+ $(RM) tags TAGS
+
+endif # skip-makefile
+
+PHONY += force
+force:
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable so we can use it in if_changed and friends.
+.PHONY: $(PHONY)
diff --git a/tools/lib/lockdep/common.c b/tools/lib/lockdep/common.c
new file mode 100644
index 0000000..8ef602f
--- /dev/null
+++ b/tools/lib/lockdep/common.c
@@ -0,0 +1,33 @@
+#include <stddef.h>
+#include <stdbool.h>
+#include <linux/compiler.h>
+#include <linux/lockdep.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+static __thread struct task_struct current_obj;
+
+/* lockdep wants these */
+bool debug_locks = true;
+bool debug_locks_silent;
+
+__attribute__((constructor)) static void liblockdep_init(void)
+{
+ lockdep_init();
+}
+
+__attribute__((destructor)) static void liblockdep_exit(void)
+{
+ debug_check_no_locks_held(&current_obj);
+}
+
+struct task_struct *__curr(void)
+{
+ if (current_obj.pid == 0) {
+ /* Makes lockdep output pretty */
+ prctl(PR_GET_NAME, current_obj.comm);
+ current_obj.pid = syscall(__NR_gettid);
+ }
+
+ return &current_obj;
+}
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
new file mode 100644
index 0000000..0bda630
--- /dev/null
+++ b/tools/lib/lockdep/include/liblockdep/common.h
@@ -0,0 +1,50 @@
+#ifndef _LIBLOCKDEP_COMMON_H
+#define _LIBLOCKDEP_COMMON_H
+
+#include <pthread.h>
+
+#define NR_LOCKDEP_CACHING_CLASSES 2
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+#ifndef CALLER_ADDR0
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#endif
+
+#ifndef _RET_IP_
+#define _RET_IP_ CALLER_ADDR0
+#endif
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
+
+struct lockdep_subclass_key {
+ char __one_byte;
+};
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+};
+
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
+ const char *name;
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+ unsigned long ip;
+#endif
+};
+
+void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass);
+void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip);
+void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), }
+
+#endif
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
new file mode 100644
index 0000000..c342f70
--- /dev/null
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -0,0 +1,70 @@
+#ifndef _LIBLOCKDEP_MUTEX_H
+#define _LIBLOCKDEP_MUTEX_H
+
+#include <pthread.h>
+#include "common.h"
+
+struct liblockdep_pthread_mutex {
+ pthread_mutex_t mutex;
+ struct lockdep_map dep_map;
+};
+
+typedef struct liblockdep_pthread_mutex liblockdep_pthread_mutex_t;
+
+#define LIBLOCKDEP_PTHREAD_MUTEX_INITIALIZER(mtx) \
+ (const struct liblockdep_pthread_mutex) { \
+ .mutex = PTHREAD_MUTEX_INITIALIZER, \
+ .dep_map = STATIC_LOCKDEP_MAP_INIT(#mtx, &((&(mtx))->dep_map)), \
+}
+
+static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
+ const char *name,
+ struct lock_class_key *key,
+ const pthread_mutexattr_t *__mutexattr)
+{
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+ return pthread_mutex_init(&lock->mutex, __mutexattr);
+}
+
+#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key, (mutexattr)); \
+})
+
+static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
+{
+ lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ return pthread_mutex_lock(&lock->mutex);
+}
+
+static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock)
+{
+ lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_);
+ return pthread_mutex_unlock(&lock->mutex);
+}
+
+static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
+{
+ lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
+}
+
+static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
+{
+ return pthread_mutex_destroy(&lock->mutex);
+}
+
+#ifdef __USE_LIBLOCKDEP
+
+#define pthread_mutex_t liblockdep_pthread_mutex_t
+#define pthread_mutex_init liblockdep_pthread_mutex_init
+#define pthread_mutex_lock liblockdep_pthread_mutex_lock
+#define pthread_mutex_unlock liblockdep_pthread_mutex_unlock
+#define pthread_mutex_trylock liblockdep_pthread_mutex_trylock
+#define pthread_mutex_destroy liblockdep_pthread_mutex_destroy
+
+#endif
+
+#endif
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
new file mode 100644
index 0000000..a680ab8
--- /dev/null
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -0,0 +1,86 @@
+#ifndef _LIBLOCKDEP_RWLOCK_H
+#define _LIBLOCKDEP_RWLOCK_H
+
+#include <pthread.h>
+#include "common.h"
+
+struct liblockdep_pthread_rwlock {
+ pthread_rwlock_t rwlock;
+ struct lockdep_map dep_map;
+};
+
+typedef struct liblockdep_pthread_rwlock liblockdep_pthread_rwlock_t;
+
+#define LIBLOCKDEP_PTHREAD_RWLOCK_INITIALIZER(rwl) \
+ (struct liblockdep_pthread_rwlock) { \
+ .rwlock = PTHREAD_RWLOCK_INITIALIZER, \
+ .dep_map = STATIC_LOCKDEP_MAP_INIT(#rwl, &((&(rwl))->dep_map)), \
+}
+
+static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
+ const char *name,
+ struct lock_class_key *key,
+ const pthread_rwlockattr_t *attr)
+{
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+
+ return pthread_rwlock_init(&lock->rwlock, attr);
+}
+
+#define liblockdep_pthread_rwlock_init(lock, attr) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key, (attr)); \
+})
+
+static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
+{
+ lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+ return pthread_rwlock_rdlock(&lock->rwlock);
+
+}
+
+static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_);
+ return pthread_rwlock_unlock(&lock->rwlock);
+}
+
+static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
+{
+ lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ return pthread_rwlock_wrlock(&lock->rwlock);
+}
+
+static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
+{
+ lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+ return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
+}
+
+static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
+{
+ lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
+}
+
+static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock)
+{
+ return pthread_rwlock_destroy(&lock->rwlock);
+}
+
+#ifdef __USE_LIBLOCKDEP
+
+#define pthread_rwlock_t liblockdep_pthread_rwlock_t
+#define pthread_rwlock_init liblockdep_pthread_rwlock_init
+#define pthread_rwlock_rdlock liblockdep_pthread_rwlock_rdlock
+#define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock
+#define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock
+#define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock
+#define pthread_rwlock_trywlock liblockdep_pthread_rwlock_trywlock
+#define pthread_rwlock_destroy liblockdep_rwlock_destroy
+
+#endif
+
+#endif
diff --git a/tools/lib/lockdep/lockdep b/tools/lib/lockdep/lockdep
new file mode 100755
index 0000000..49af9fe
--- /dev/null
+++ b/tools/lib/lockdep/lockdep
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+LD_PRELOAD="./liblockdep.so $LD_PRELOAD" "$@"
diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c
new file mode 100644
index 0000000..f42b7e9
--- /dev/null
+++ b/tools/lib/lockdep/lockdep.c
@@ -0,0 +1,2 @@
+#include <linux/lockdep.h>
+#include "../../../kernel/locking/lockdep.c"
diff --git a/tools/lib/lockdep/lockdep_internals.h b/tools/lib/lockdep/lockdep_internals.h
new file mode 100644
index 0000000..29d0c95
--- /dev/null
+++ b/tools/lib/lockdep/lockdep_internals.h
@@ -0,0 +1 @@
+#include "../../../kernel/locking/lockdep_internals.h"
diff --git a/tools/lib/lockdep/lockdep_states.h b/tools/lib/lockdep/lockdep_states.h
new file mode 100644
index 0000000..248d235
--- /dev/null
+++ b/tools/lib/lockdep/lockdep_states.h
@@ -0,0 +1 @@
+#include "../../../kernel/locking/lockdep_states.h"
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
new file mode 100644
index 0000000..f8465a8
--- /dev/null
+++ b/tools/lib/lockdep/preload.c
@@ -0,0 +1,447 @@
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <stdio.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <sysexits.h>
+#include "include/liblockdep/mutex.h"
+#include "../../../include/linux/rbtree.h"
+
+/**
+ * struct lock_lookup - liblockdep's view of a single unique lock
+ * @orig: pointer to the original pthread lock, used for lookups
+ * @dep_map: lockdep's dep_map structure
+ * @key: lockdep's key structure
+ * @node: rb-tree node used to store the lock in a global tree
+ * @name: a unique name for the lock
+ */
+struct lock_lookup {
+ void *orig; /* Original pthread lock, used for lookups */
+ struct lockdep_map dep_map; /* Since all locks are dynamic, we need
+ * a dep_map and a key for each lock */
+ /*
+ * Wait, there's no support for key classes? Yup :(
+ * Most big projects wrap the pthread api with their own calls to
+ * be compatible with different locking methods. This means that
+ * "classes" will be brokes since the function that creates all
+ * locks will point to a generic locking function instead of the
+ * actual code that wants to do the locking.
+ */
+ struct lock_class_key key;
+ struct rb_node node;
+#define LIBLOCKDEP_MAX_LOCK_NAME 22
+ char name[LIBLOCKDEP_MAX_LOCK_NAME];
+};
+
+/* This is where we store our locks */
+static struct rb_root locks = RB_ROOT;
+static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
+
+/* pthread mutex API */
+
+#ifdef __GLIBC__
+extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
+extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
+extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
+extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
+extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
+#else
+#define __pthread_mutex_init NULL
+#define __pthread_mutex_lock NULL
+#define __pthread_mutex_trylock NULL
+#define __pthread_mutex_unlock NULL
+#define __pthread_mutex_destroy NULL
+#endif
+static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *attr) = __pthread_mutex_init;
+static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock;
+static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock;
+static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock;
+static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy;
+
+/* pthread rwlock API */
+
+#ifdef __GLIBC__
+extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
+extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
+extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
+extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
+extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
+extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
+#else
+#define __pthread_rwlock_init NULL
+#define __pthread_rwlock_destroy NULL
+#define __pthread_rwlock_wrlock NULL
+#define __pthread_rwlock_trywrlock NULL
+#define __pthread_rwlock_rdlock NULL
+#define __pthread_rwlock_tryrdlock NULL
+#define __pthread_rwlock_unlock NULL
+#endif
+
+static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
+ const pthread_rwlockattr_t *attr) = __pthread_rwlock_init;
+static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy;
+static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock;
+static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock;
+static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock;
+static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock;
+static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock;
+
+enum { none, prepare, done, } __init_state;
+static void init_preload(void);
+static void try_init_preload(void)
+{
+ if (!__init_state != done)
+ init_preload();
+}
+
+static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
+{
+ struct rb_node **node = &locks.rb_node;
+ struct lock_lookup *l;
+
+ *parent = NULL;
+
+ while (*node) {
+ l = rb_entry(*node, struct lock_lookup, node);
+
+ *parent = *node;
+ if (lock < l->orig)
+ node = &l->node.rb_left;
+ else if (lock > l->orig)
+ node = &l->node.rb_right;
+ else
+ return node;
+ }
+
+ return node;
+}
+
+#ifndef LIBLOCKDEP_STATIC_ENTRIES
+#define LIBLOCKDEP_STATIC_ENTRIES 1024
+#endif
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
+static int __locks_nr;
+
+static inline bool is_static_lock(struct lock_lookup *lock)
+{
+ return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
+}
+
+static struct lock_lookup *alloc_lock(void)
+{
+ if (__init_state != done) {
+ /*
+ * Some programs attempt to initialize and use locks in their
+ * allocation path. This means that a call to malloc() would
+ * result in locks being initialized and locked.
+ *
+ * Why is it an issue for us? dlsym() below will try allocating
+ * to give us the original function. Since this allocation will
+ * result in a locking operations, we have to let pthread deal
+ * with it, but we can't! we don't have the pointer to the
+ * original API since we're inside dlsym() trying to get it
+ */
+
+ int idx = __locks_nr++;
+ if (idx >= ARRAY_SIZE(__locks)) {
+ fprintf(stderr,
+ "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
+ exit(EX_UNAVAILABLE);
+ }
+ return __locks + idx;
+ }
+
+ return malloc(sizeof(struct lock_lookup));
+}
+
+static inline void free_lock(struct lock_lookup *lock)
+{
+ if (likely(!is_static_lock(lock)))
+ free(lock);
+}
+
+/**
+ * __get_lock - find or create a lock instance
+ * @lock: pointer to a pthread lock function
+ *
+ * Try to find an existing lock in the rbtree using the provided pointer. If
+ * one wasn't found - create it.
+ */
+static struct lock_lookup *__get_lock(void *lock)
+{
+ struct rb_node **node, *parent;
+ struct lock_lookup *l;
+
+ ll_pthread_rwlock_rdlock(&locks_rwlock);
+ node = __get_lock_node(lock, &parent);
+ ll_pthread_rwlock_unlock(&locks_rwlock);
+ if (*node) {
+ return rb_entry(*node, struct lock_lookup, node);
+ }
+
+ /* We didn't find the lock, let's create it */
+ l = alloc_lock();
+ if (l == NULL)
+ return NULL;
+
+ l->orig = lock;
+ /*
+ * Currently the name of the lock is the ptr value of the pthread lock,
+ * while not optimal, it makes debugging a bit easier.
+ *
+ * TODO: Get the real name of the lock using libdwarf
+ */
+ sprintf(l->name, "%p", lock);
+ lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
+
+ ll_pthread_rwlock_wrlock(&locks_rwlock);
+ /* This might have changed since the last time we fetched it */
+ node = __get_lock_node(lock, &parent);
+ rb_link_node(&l->node, parent, node);
+ rb_insert_color(&l->node, &locks);
+ ll_pthread_rwlock_unlock(&locks_rwlock);
+
+ return l;
+}
+
+static void __del_lock(struct lock_lookup *lock)
+{
+ ll_pthread_rwlock_wrlock(&locks_rwlock);
+ rb_erase(&lock->node, &locks);
+ ll_pthread_rwlock_unlock(&locks_rwlock);
+ free_lock(lock);
+}
+
+int pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *attr)
+{
+ int r;
+
+ /*
+ * We keep trying to init our preload module because there might be
+ * code in init sections that tries to touch locks before we are
+ * initialized, in that case we'll need to manually call preload
+ * to get us going.
+ *
+ * Funny enough, kernel's lockdep had the same issue, and used
+ * (almost) the same solution. See look_up_lock_class() in
+ * kernel/locking/lockdep.c for details.
+ */
+ try_init_preload();
+
+ r = ll_pthread_mutex_init(mutex, attr);
+ if (r == 0)
+ /*
+ * We do a dummy initialization here so that lockdep could
+ * warn us if something fishy is going on - such as
+ * initializing a held lock.
+ */
+ __get_lock(mutex);
+
+ return r;
+}
+
+int pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ int r;
+
+ try_init_preload();
+
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL,
+ (unsigned long)_RET_IP_);
+ /*
+ * Here's the thing with pthread mutexes: unlike the kernel variant,
+ * they can fail.
+ *
+ * This means that the behaviour here is a bit different from what's
+ * going on in the kernel: there we just tell lockdep that we took the
+ * lock before actually taking it, but here we must deal with the case
+ * that locking failed.
+ *
+ * To do that we'll "release" the lock if locking failed - this way
+ * we'll get lockdep doing the correct checks when we try to take
+ * the lock, and if that fails - we'll be back to the correct
+ * state by releasing it.
+ */
+ r = ll_pthread_mutex_lock(mutex);
+ if (r)
+ lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ int r;
+
+ try_init_preload();
+
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ r = ll_pthread_mutex_trylock(mutex);
+ if (r)
+ lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ int r;
+
+ try_init_preload();
+
+ lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
+ /*
+ * Just like taking a lock, only in reverse!
+ *
+ * If we fail releasing the lock, tell lockdep we're holding it again.
+ */
+ r = ll_pthread_mutex_unlock(mutex);
+ if (r)
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_mutex_destroy(pthread_mutex_t *mutex)
+{
+ try_init_preload();
+
+ /*
+ * Let's see if we're releasing a lock that's held.
+ *
+ * TODO: Hook into free() and add that check there as well.
+ */
+ debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+ __del_lock(__get_lock(mutex));
+ return ll_pthread_mutex_destroy(mutex);
+}
+
+/* This is the rwlock part, very similar to what happened with mutex above */
+int pthread_rwlock_init(pthread_rwlock_t *rwlock,
+ const pthread_rwlockattr_t *attr)
+{
+ int r;
+
+ try_init_preload();
+
+ r = ll_pthread_rwlock_init(rwlock, attr);
+ if (r == 0)
+ __get_lock(rwlock);
+
+ return r;
+}
+
+int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
+{
+ try_init_preload();
+
+ debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+ __del_lock(__get_lock(rwlock));
+ return ll_pthread_rwlock_destroy(rwlock);
+}
+
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
+{
+ int r;
+
+ init_preload();
+
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+ r = ll_pthread_rwlock_rdlock(rwlock);
+ if (r)
+ lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
+{
+ int r;
+
+ init_preload();
+
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+ r = ll_pthread_rwlock_tryrdlock(rwlock);
+ if (r)
+ lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
+{
+ int r;
+
+ init_preload();
+
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ r = ll_pthread_rwlock_trywrlock(rwlock);
+ if (r)
+ lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
+{
+ int r;
+
+ init_preload();
+
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ r = ll_pthread_rwlock_wrlock(rwlock);
+ if (r)
+ lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
+{
+ int r;
+
+ init_preload();
+
+ lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+ r = ll_pthread_rwlock_unlock(rwlock);
+ if (r)
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+
+ return r;
+}
+
+__attribute__((constructor)) static void init_preload(void)
+{
+ if (__init_state != done)
+ return;
+
+#ifndef __GLIBC__
+ __init_state = prepare;
+
+ ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
+ ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
+ ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
+ ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
+ ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
+
+ ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
+ ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
+ ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
+ ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
+ ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
+ ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
+ ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
+#endif
+
+ printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
+
+ lockdep_init();
+
+ __init_state = done;
+}
diff --git a/tools/lib/lockdep/rbtree.c b/tools/lib/lockdep/rbtree.c
new file mode 100644
index 0000000..f7f4303
--- /dev/null
+++ b/tools/lib/lockdep/rbtree.c
@@ -0,0 +1 @@
+#include "../../../lib/rbtree.c"
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
new file mode 100644
index 0000000..5334ad9
--- /dev/null
+++ b/tools/lib/lockdep/run_tests.sh
@@ -0,0 +1,27 @@
+#! /bin/bash
+
+make &> /dev/null
+
+for i in `ls tests/*.c`; do
+ testname=$(basename -s .c "$i")
+ gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
+ echo -ne "$testname... "
+ if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
+ echo "PASSED!"
+ else
+ echo "FAILED!"
+ fi
+ rm tests/$testname
+done
+
+for i in `ls tests/*.c`; do
+ testname=$(basename -s .c "$i")
+ gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
+ echo -ne "(PRELOAD) $testname... "
+ if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
+ echo "PASSED!"
+ else
+ echo "FAILED!"
+ fi
+ rm tests/$testname
+done
diff --git a/tools/lib/lockdep/tests/AA.c b/tools/lib/lockdep/tests/AA.c
new file mode 100644
index 0000000..0f782ff
--- /dev/null
+++ b/tools/lib/lockdep/tests/AA.c
@@ -0,0 +1,13 @@
+#include <liblockdep/mutex.h>
+
+void main(void)
+{
+ pthread_mutex_t a, b;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+
+ pthread_mutex_lock(&a);
+ pthread_mutex_lock(&b);
+ pthread_mutex_lock(&a);
+}
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
new file mode 100644
index 0000000..07f0e29d
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBA.c
@@ -0,0 +1,13 @@
+#include <liblockdep/mutex.h>
+#include "common.h"
+
+void main(void)
+{
+ pthread_mutex_t a, b;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(b, a);
+}
diff --git a/tools/lib/lockdep/tests/ABBCCA.c b/tools/lib/lockdep/tests/ABBCCA.c
new file mode 100644
index 0000000..843db09
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBCCA.c
@@ -0,0 +1,15 @@
+#include <liblockdep/mutex.h>
+#include "common.h"
+
+void main(void)
+{
+ pthread_mutex_t a, b, c;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+ pthread_mutex_init(&c, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(b, c);
+ LOCK_UNLOCK_2(c, a);
+}
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.c b/tools/lib/lockdep/tests/ABBCCDDA.c
new file mode 100644
index 0000000..33620e2
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBCCDDA.c
@@ -0,0 +1,17 @@
+#include <liblockdep/mutex.h>
+#include "common.h"
+
+void main(void)
+{
+ pthread_mutex_t a, b, c, d;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+ pthread_mutex_init(&c, NULL);
+ pthread_mutex_init(&d, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(b, c);
+ LOCK_UNLOCK_2(c, d);
+ LOCK_UNLOCK_2(d, a);
+}
diff --git a/tools/lib/lockdep/tests/ABCABC.c b/tools/lib/lockdep/tests/ABCABC.c
new file mode 100644
index 0000000..3fee51e
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABCABC.c
@@ -0,0 +1,15 @@
+#include <liblockdep/mutex.h>
+#include "common.h"
+
+void main(void)
+{
+ pthread_mutex_t a, b, c;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+ pthread_mutex_init(&c, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(c, a);
+ LOCK_UNLOCK_2(b, c);
+}
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.c b/tools/lib/lockdep/tests/ABCDBCDA.c
new file mode 100644
index 0000000..427ba56
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABCDBCDA.c
@@ -0,0 +1,17 @@
+#include <liblockdep/mutex.h>
+#include "common.h"
+
+void main(void)
+{
+ pthread_mutex_t a, b, c, d;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+ pthread_mutex_init(&c, NULL);
+ pthread_mutex_init(&d, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(c, d);
+ LOCK_UNLOCK_2(b, c);
+ LOCK_UNLOCK_2(d, a);
+}
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.c b/tools/lib/lockdep/tests/ABCDBDDA.c
new file mode 100644
index 0000000..680c6cf
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABCDBDDA.c
@@ -0,0 +1,17 @@
+#include <liblockdep/mutex.h>
+#include "common.h"
+
+void main(void)
+{
+ pthread_mutex_t a, b, c, d;
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+ pthread_mutex_init(&c, NULL);
+ pthread_mutex_init(&d, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(c, d);
+ LOCK_UNLOCK_2(b, d);
+ LOCK_UNLOCK_2(d, a);
+}
diff --git a/tools/lib/lockdep/tests/WW.c b/tools/lib/lockdep/tests/WW.c
new file mode 100644
index 0000000..d44f77d
--- /dev/null
+++ b/tools/lib/lockdep/tests/WW.c
@@ -0,0 +1,13 @@
+#include <liblockdep/rwlock.h>
+
+void main(void)
+{
+ pthread_rwlock_t a, b;
+
+ pthread_rwlock_init(&a, NULL);
+ pthread_rwlock_init(&b, NULL);
+
+ pthread_rwlock_wrlock(&a);
+ pthread_rwlock_rdlock(&b);
+ pthread_rwlock_wrlock(&a);
+}
diff --git a/tools/lib/lockdep/tests/common.h b/tools/lib/lockdep/tests/common.h
new file mode 100644
index 0000000..d89e94d
--- /dev/null
+++ b/tools/lib/lockdep/tests/common.h
@@ -0,0 +1,12 @@
+#ifndef _LIBLOCKDEP_TEST_COMMON_H
+#define _LIBLOCKDEP_TEST_COMMON_H
+
+#define LOCK_UNLOCK_2(a, b) \
+ do { \
+ pthread_mutex_lock(&(a)); \
+ pthread_mutex_lock(&(b)); \
+ pthread_mutex_unlock(&(b)); \
+ pthread_mutex_unlock(&(a)); \
+ } while(0)
+
+#endif
diff --git a/tools/lib/lockdep/tests/unlock_balance.c b/tools/lib/lockdep/tests/unlock_balance.c
new file mode 100644
index 0000000..0bc62de
--- /dev/null
+++ b/tools/lib/lockdep/tests/unlock_balance.c
@@ -0,0 +1,12 @@
+#include <liblockdep/mutex.h>
+
+void main(void)
+{
+ pthread_mutex_t a;
+
+ pthread_mutex_init(&a, NULL);
+
+ pthread_mutex_lock(&a);
+ pthread_mutex_unlock(&a);
+ pthread_mutex_unlock(&a);
+}
diff --git a/tools/lib/lockdep/uinclude/asm/hweight.h b/tools/lib/lockdep/uinclude/asm/hweight.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/asm/hweight.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/asm/sections.h b/tools/lib/lockdep/uinclude/asm/sections.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/asm/sections.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/bitops.h b/tools/lib/lockdep/uinclude/linux/bitops.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/bitops.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/compiler.h b/tools/lib/lockdep/uinclude/linux/compiler.h
new file mode 100644
index 0000000..7ac838a
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/compiler.h
@@ -0,0 +1,7 @@
+#ifndef _LIBLOCKDEP_LINUX_COMPILER_H_
+#define _LIBLOCKDEP_LINUX_COMPILER_H_
+
+#define __used __attribute__((__unused__))
+#define unlikely
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/debug_locks.h b/tools/lib/lockdep/uinclude/linux/debug_locks.h
new file mode 100644
index 0000000..f38eb64
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/debug_locks.h
@@ -0,0 +1,12 @@
+#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
+#define _LIBLOCKDEP_DEBUG_LOCKS_H_
+
+#include <stddef.h>
+#include <linux/compiler.h>
+
+#define DEBUG_LOCKS_WARN_ON(x) (x)
+
+extern bool debug_locks;
+extern bool debug_locks_silent;
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/delay.h b/tools/lib/lockdep/uinclude/linux/delay.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/delay.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/export.h b/tools/lib/lockdep/uinclude/linux/export.h
new file mode 100644
index 0000000..6bdf349
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/export.h
@@ -0,0 +1,7 @@
+#ifndef _LIBLOCKDEP_LINUX_EXPORT_H_
+#define _LIBLOCKDEP_LINUX_EXPORT_H_
+
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/ftrace.h b/tools/lib/lockdep/uinclude/linux/ftrace.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/ftrace.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/gfp.h b/tools/lib/lockdep/uinclude/linux/gfp.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/gfp.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/hardirq.h b/tools/lib/lockdep/uinclude/linux/hardirq.h
new file mode 100644
index 0000000..c8f3f8f
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/hardirq.h
@@ -0,0 +1,11 @@
+#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
+#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
+
+#define SOFTIRQ_BITS 0UL
+#define HARDIRQ_BITS 0UL
+#define SOFTIRQ_SHIFT 0UL
+#define HARDIRQ_SHIFT 0UL
+#define hardirq_count() 0UL
+#define softirq_count() 0UL
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/hash.h b/tools/lib/lockdep/uinclude/linux/hash.h
new file mode 100644
index 0000000..0f84798
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/hash.h
@@ -0,0 +1 @@
+#include "../../../include/linux/hash.h"
diff --git a/tools/lib/lockdep/uinclude/linux/interrupt.h b/tools/lib/lockdep/uinclude/linux/interrupt.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/interrupt.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/irqflags.h b/tools/lib/lockdep/uinclude/linux/irqflags.h
new file mode 100644
index 0000000..6cc296f
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/irqflags.h
@@ -0,0 +1,38 @@
+#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
+#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
+
+# define trace_hardirq_context(p) 0
+# define trace_softirq_context(p) 0
+# define trace_hardirqs_enabled(p) 0
+# define trace_softirqs_enabled(p) 0
+# define trace_hardirq_enter() do { } while (0)
+# define trace_hardirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+
+# define stop_critical_timings() do { } while (0)
+# define start_critical_timings() do { } while (0)
+
+#define raw_local_irq_disable() do { } while (0)
+#define raw_local_irq_enable() do { } while (0)
+#define raw_local_irq_save(flags) ((flags) = 0)
+#define raw_local_irq_restore(flags) do { } while (0)
+#define raw_local_save_flags(flags) ((flags) = 0)
+#define raw_irqs_disabled_flags(flags) do { } while (0)
+#define raw_irqs_disabled() 0
+#define raw_safe_halt()
+
+#define local_irq_enable() do { } while (0)
+#define local_irq_disable() do { } while (0)
+#define local_irq_save(flags) ((flags) = 0)
+#define local_irq_restore(flags) do { } while (0)
+#define local_save_flags(flags) ((flags) = 0)
+#define irqs_disabled() (1)
+#define irqs_disabled_flags(flags) (0)
+#define safe_halt() do { } while (0)
+
+#define trace_lock_release(x, y)
+#define trace_lock_acquire(a, b, c, d, e, f, g)
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/kallsyms.h b/tools/lib/lockdep/uinclude/linux/kallsyms.h
new file mode 100644
index 0000000..b0f2dbd
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/kallsyms.h
@@ -0,0 +1,32 @@
+#ifndef _LIBLOCKDEP_LINUX_KALLSYMS_H_
+#define _LIBLOCKDEP_LINUX_KALLSYMS_H_
+
+#include <linux/kernel.h>
+#include <stdio.h>
+
+#define KSYM_NAME_LEN 128
+
+struct module;
+
+static inline const char *kallsyms_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, char *namebuf)
+{
+ return NULL;
+}
+
+#include <execinfo.h>
+#include <stdlib.h>
+static inline void print_ip_sym(unsigned long ip)
+{
+ char **name;
+
+ name = backtrace_symbols((void **)&ip, 1);
+
+ printf("%s\n", *name);
+
+ free(name);
+}
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/kern_levels.h b/tools/lib/lockdep/uinclude/linux/kern_levels.h
new file mode 100644
index 0000000..3b9bade
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/kern_levels.h
@@ -0,0 +1,25 @@
+#ifndef __KERN_LEVELS_H__
+#define __KERN_LEVELS_H__
+
+#define KERN_SOH "" /* ASCII Start Of Header */
+#define KERN_SOH_ASCII ''
+
+#define KERN_EMERG KERN_SOH "" /* system is unusable */
+#define KERN_ALERT KERN_SOH "" /* action must be taken immediately */
+#define KERN_CRIT KERN_SOH "" /* critical conditions */
+#define KERN_ERR KERN_SOH "" /* error conditions */
+#define KERN_WARNING KERN_SOH "" /* warning conditions */
+#define KERN_NOTICE KERN_SOH "" /* normal but significant condition */
+#define KERN_INFO KERN_SOH "" /* informational */
+#define KERN_DEBUG KERN_SOH "" /* debug-level messages */
+
+#define KERN_DEFAULT KERN_SOH "" /* the default kernel loglevel */
+
+/*
+ * Annotation for a "continued" line of log printout (only done after a
+ * line that had no enclosing \n). Only to be used by core/arch code
+ * during early bootup (a continued line is not SMP-safe otherwise).
+ */
+#define KERN_CONT ""
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h
new file mode 100644
index 0000000..a11e3c3
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/kernel.h
@@ -0,0 +1,44 @@
+#ifndef _LIBLOCKDEP_LINUX_KERNEL_H_
+#define _LIBLOCKDEP_LINUX_KERNEL_H_
+
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/rcu.h>
+#include <linux/hardirq.h>
+#include <linux/kern_levels.h>
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define max(x, y) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ (void) (&_max1 == &_max2); \
+ _max1 > _max2 ? _max1 : _max2; })
+
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#define WARN_ON(x) (x)
+#define WARN_ON_ONCE(x) (x)
+#define likely(x) (x)
+#define WARN(x, y, z) (x)
+#define uninitialized_var(x) x
+#define __init
+#define noinline
+#define list_add_tail_rcu list_add_tail
+
+#ifndef CALLER_ADDR0
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#endif
+
+#ifndef _RET_IP_
+#define _RET_IP_ CALLER_ADDR0
+#endif
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/kmemcheck.h b/tools/lib/lockdep/uinclude/linux/kmemcheck.h
new file mode 100644
index 0000000..94d598b
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/kmemcheck.h
@@ -0,0 +1,8 @@
+#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_
+#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_
+
+static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+}
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/linkage.h b/tools/lib/lockdep/uinclude/linux/linkage.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/linkage.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/list.h b/tools/lib/lockdep/uinclude/linux/list.h
new file mode 100644
index 0000000..6e9ef31
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/list.h
@@ -0,0 +1 @@
+#include "../../../include/linux/list.h"
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
new file mode 100644
index 0000000..d0f5d6e
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
@@ -0,0 +1,55 @@
+#ifndef _LIBLOCKDEP_LOCKDEP_H_
+#define _LIBLOCKDEP_LOCKDEP_H_
+
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <string.h>
+#include <limits.h>
+#include <linux/utsname.h>
+
+
+#define MAX_LOCK_DEPTH 2000UL
+
+#include "../../../include/linux/lockdep.h"
+
+struct task_struct {
+ u64 curr_chain_key;
+ int lockdep_depth;
+ unsigned int lockdep_recursion;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
+ gfp_t lockdep_reclaim_gfp;
+ int pid;
+ char comm[17];
+};
+
+extern struct task_struct *__curr(void);
+
+#define current (__curr())
+
+#define debug_locks_off() 1
+#define task_pid_nr(tsk) ((tsk)->pid)
+
+#define KSYM_NAME_LEN 128
+#define printk printf
+
+#define list_del_rcu list_del
+
+#define atomic_t unsigned long
+#define atomic_inc(x) ((*(x))++)
+
+static struct new_utsname *init_utsname(void)
+{
+ static struct new_utsname n = (struct new_utsname) {
+ .release = "liblockdep",
+ .version = LIBLOCKDEP_VERSION,
+ };
+
+ return &n;
+}
+
+#define print_tainted() ""
+#define static_obj(x) 1
+
+#define debug_show_all_locks()
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/module.h b/tools/lib/lockdep/uinclude/linux/module.h
new file mode 100644
index 0000000..09c7a7b
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/module.h
@@ -0,0 +1,6 @@
+#ifndef _LIBLOCKDEP_LINUX_MODULE_H_
+#define _LIBLOCKDEP_LINUX_MODULE_H_
+
+#define module_param(name, type, perm)
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/mutex.h b/tools/lib/lockdep/uinclude/linux/mutex.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/mutex.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/poison.h b/tools/lib/lockdep/uinclude/linux/poison.h
new file mode 100644
index 0000000..0c27bdf
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/poison.h
@@ -0,0 +1 @@
+#include "../../../include/linux/poison.h"
diff --git a/tools/lib/lockdep/uinclude/linux/prefetch.h b/tools/lib/lockdep/uinclude/linux/prefetch.h
new file mode 100644
index 0000000..d73fe6f
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/prefetch.h
@@ -0,0 +1,6 @@
+#ifndef _LIBLOCKDEP_LINUX_PREFETCH_H_
+#define _LIBLOCKDEP_LINUX_PREFETCH_H
+
+static inline void prefetch(void *a __attribute__((unused))) { }
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/proc_fs.h b/tools/lib/lockdep/uinclude/linux/proc_fs.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/proc_fs.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/rbtree.h b/tools/lib/lockdep/uinclude/linux/rbtree.h
new file mode 100644
index 0000000..965901d
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/rbtree.h
@@ -0,0 +1 @@
+#include "../../../include/linux/rbtree.h"
diff --git a/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h b/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h
new file mode 100644
index 0000000..c375947
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h
@@ -0,0 +1,2 @@
+#define __always_inline
+#include "../../../include/linux/rbtree_augmented.h"
diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h
new file mode 100644
index 0000000..4c99fcb
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/rcu.h
@@ -0,0 +1,16 @@
+#ifndef _LIBLOCKDEP_RCU_H_
+#define _LIBLOCKDEP_RCU_H_
+
+int rcu_scheduler_active;
+
+static inline int rcu_lockdep_current_cpu_online(void)
+{
+ return 1;
+}
+
+static inline int rcu_is_cpu_idle(void)
+{
+ return 1;
+}
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/seq_file.h b/tools/lib/lockdep/uinclude/linux/seq_file.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/seq_file.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/lockdep/uinclude/linux/spinlock.h b/tools/lib/lockdep/uinclude/linux/spinlock.h
new file mode 100644
index 0000000..68c1aa2
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/spinlock.h
@@ -0,0 +1,25 @@
+#ifndef _LIBLOCKDEP_SPINLOCK_H_
+#define _LIBLOCKDEP_SPINLOCK_H_
+
+#include <pthread.h>
+#include <stdbool.h>
+
+#define arch_spinlock_t pthread_mutex_t
+#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+static inline void arch_spin_lock(arch_spinlock_t *mutex)
+{
+ pthread_mutex_lock(mutex);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *mutex)
+{
+ pthread_mutex_unlock(mutex);
+}
+
+static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
+{
+ return true;
+}
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/stacktrace.h b/tools/lib/lockdep/uinclude/linux/stacktrace.h
new file mode 100644
index 0000000..39aecc6
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/stacktrace.h
@@ -0,0 +1,32 @@
+#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
+#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
+
+#include <execinfo.h>
+
+struct stack_trace {
+ unsigned int nr_entries, max_entries;
+ unsigned long *entries;
+ int skip;
+};
+
+static inline void print_stack_trace(struct stack_trace *trace, int spaces)
+{
+ backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
+}
+
+#define save_stack_trace(trace) \
+ ((trace)->nr_entries = \
+ backtrace((void **)(trace)->entries, (trace)->max_entries))
+
+static inline int dump_stack(void)
+{
+ void *array[64];
+ size_t size;
+
+ size = backtrace(array, 64);
+ backtrace_symbols_fd(array, size, 1);
+
+ return 0;
+}
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/stringify.h b/tools/lib/lockdep/uinclude/linux/stringify.h
new file mode 100644
index 0000000..05dfcd1
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/stringify.h
@@ -0,0 +1,7 @@
+#ifndef _LIBLOCKDEP_LINUX_STRINGIFY_H_
+#define _LIBLOCKDEP_LINUX_STRINGIFY_H_
+
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/types.h b/tools/lib/lockdep/uinclude/linux/types.h
new file mode 100644
index 0000000..929938f
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/linux/types.h
@@ -0,0 +1,58 @@
+#ifndef _LIBLOCKDEP_LINUX_TYPES_H_
+#define _LIBLOCKDEP_LINUX_TYPES_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
+#include <asm/types.h>
+
+struct page;
+struct kmem_cache;
+
+typedef unsigned gfp_t;
+
+typedef __u64 u64;
+typedef __s64 s64;
+
+typedef __u32 u32;
+typedef __s32 s32;
+
+typedef __u16 u16;
+typedef __s16 s16;
+
+typedef __u8 u8;
+typedef __s8 s8;
+
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+#ifdef __CHECK_ENDIAN__
+#define __bitwise __bitwise__
+#else
+#define __bitwise
+#endif
+
+
+typedef __u16 __bitwise __le16;
+typedef __u16 __bitwise __be16;
+typedef __u32 __bitwise __le32;
+typedef __u32 __bitwise __be32;
+typedef __u64 __bitwise __le64;
+typedef __u64 __bitwise __be64;
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/trace/events/lock.h b/tools/lib/lockdep/uinclude/trace/events/lock.h
new file mode 100644
index 0000000..fab00ff
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/trace/events/lock.h
@@ -0,0 +1,3 @@
+
+/* empty file */
+
diff --git a/tools/lib/symbol/kallsyms.c b/tools/lib/symbol/kallsyms.c
new file mode 100644
index 0000000..18bc271
--- /dev/null
+++ b/tools/lib/symbol/kallsyms.c
@@ -0,0 +1,58 @@
+#include "symbol/kallsyms.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start))
+{
+ char *line = NULL;
+ size_t n;
+ int err = -1;
+ FILE *file = fopen(filename, "r");
+
+ if (file == NULL)
+ goto out_failure;
+
+ err = 0;
+
+ while (!feof(file)) {
+ u64 start;
+ int line_len, len;
+ char symbol_type;
+ char *symbol_name;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0 || !line)
+ break;
+
+ line[--line_len] = '\0'; /* \n */
+
+ len = hex2u64(line, &start);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ symbol_type = line[len];
+ len += 2;
+ symbol_name = line + len;
+ len = line_len - len;
+
+ if (len >= KSYM_NAME_LEN) {
+ err = -1;
+ break;
+ }
+
+ err = process_symbol(arg, symbol_name, symbol_type, start);
+ if (err)
+ break;
+ }
+
+ free(line);
+ fclose(file);
+ return err;
+
+out_failure:
+ return -1;
+}
diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h
new file mode 100644
index 0000000..6084f5e
--- /dev/null
+++ b/tools/lib/symbol/kallsyms.h
@@ -0,0 +1,24 @@
+#ifndef __TOOLS_KALLSYMS_H_
+#define __TOOLS_KALLSYMS_H_ 1
+
+#include <elf.h>
+#include <linux/ctype.h>
+#include <linux/types.h>
+
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 256
+#endif
+
+static inline u8 kallsyms2elf_type(char type)
+{
+ if (type == 'W')
+ return STB_WEAK;
+
+ return isupper(type) ? STB_GLOBAL : STB_LOCAL;
+}
+
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start));
+
+#endif /* __TOOLS_KALLSYMS_H_ */
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index fc15020..56d52a3 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -43,6 +43,32 @@ man_dir_SQ = '$(subst ','\'',$(man_dir))'
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
+set_plugin_dir := 1
+
+# Set plugin_dir to preffered global plugin location
+# If we install under $HOME directory we go under
+# $(HOME)/.traceevent/plugins
+#
+# We dont set PLUGIN_DIR in case we install under $HOME
+# directory, because by default the code looks under:
+# $(HOME)/.traceevent/plugins by default.
+#
+ifeq ($(plugin_dir),)
+ifeq ($(prefix),$(HOME))
+override plugin_dir = $(HOME)/.traceevent/plugins
+set_plugin_dir := 0
+else
+override plugin_dir = $(prefix)/lib/traceevent/plugins
+endif
+endif
+
+ifeq ($(set_plugin_dir),1)
+PLUGIN_DIR = -DPLUGIN_DIR="$(DESTDIR)/$(plugin_dir)"
+PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
+endif
+
+include $(if $(BUILD_SRC),$(BUILD_SRC)/)../../scripts/Makefile.include
+
# copy a bit from Linux kbuild
ifeq ("$(origin V)", "command line")
@@ -57,18 +83,13 @@ ifeq ("$(origin O)", "command line")
endif
ifeq ($(BUILD_SRC),)
-ifneq ($(BUILD_OUTPUT),)
+ifneq ($(OUTPUT),)
define build_output
- $(if $(VERBOSE:1=),@)+$(MAKE) -C $(BUILD_OUTPUT) \
- BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
+ $(if $(VERBOSE:1=),@)+$(MAKE) -C $(OUTPUT) \
+ BUILD_SRC=$(CURDIR)/ -f $(CURDIR)/Makefile $1
endef
-saved-output := $(BUILD_OUTPUT)
-BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
-$(if $(BUILD_OUTPUT),, \
- $(error output directory "$(saved-output)" does not exist))
-
all: sub-make
$(MAKECMDGOALS): sub-make
@@ -80,7 +101,7 @@ sub-make: force
# Leave processing to above invocation of make
skip-makefile := 1
-endif # BUILD_OUTPUT
+endif # OUTPUT
endif # BUILD_SRC
# We process the rest of the Makefile if this is the final invocation of make
@@ -96,6 +117,7 @@ export prefix bindir src obj
# Shell quotes
bindir_SQ = $(subst ','\'',$(bindir))
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
LIB_FILE = libtraceevent.a libtraceevent.so
@@ -114,7 +136,7 @@ export Q VERBOSE
EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
-INCLUDES = -I. $(CONFIG_INCLUDES)
+INCLUDES = -I. -I $(srctree)/../../include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -Wall
@@ -125,41 +147,14 @@ override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
ifeq ($(VERBOSE),1)
Q =
- print_compile =
- print_app_build =
- print_fpic_compile =
- print_shared_lib_compile =
- print_plugin_obj_compile =
- print_plugin_build =
- print_install =
else
Q = @
- print_compile = echo ' CC '$(OBJ);
- print_app_build = echo ' BUILD '$(OBJ);
- print_fpic_compile = echo ' CC FPIC '$(OBJ);
- print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
- print_plugin_obj_compile = echo ' BUILD PLUGIN OBJ '$(OBJ);
- print_plugin_build = echo ' BUILD PLUGIN '$(OBJ);
- print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
- print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
endif
-do_fpic_compile = \
- ($(print_fpic_compile) \
- $(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
-
-do_app_build = \
- ($(print_app_build) \
- $(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
-
do_compile_shared_library = \
($(print_shared_lib_compile) \
$(CC) --shared $^ -o $@)
-do_compile_plugin_obj = \
- ($(print_plugin_obj_compile) \
- $(CC) -c $(CFLAGS) -fPIC -o $@ $<)
-
do_plugin_build = \
($(print_plugin_build) \
$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<)
@@ -169,23 +164,37 @@ do_build_static_lib = \
$(RM) $@; $(AR) rcs $@ $^)
-define do_compile
- $(print_compile) \
- $(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
-endef
+do_compile = $(QUIET_CC)$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
$(obj)/%.o: $(src)/%.c
- $(Q)$(call do_compile)
+ $(call do_compile)
%.o: $(src)/%.c
- $(Q)$(call do_compile)
+ $(call do_compile)
-PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o
+PEVENT_LIB_OBJS = event-parse.o
+PEVENT_LIB_OBJS += event-plugin.o
+PEVENT_LIB_OBJS += trace-seq.o
+PEVENT_LIB_OBJS += parse-filter.o
+PEVENT_LIB_OBJS += parse-utils.o
PEVENT_LIB_OBJS += kbuffer-parse.o
-ALL_OBJS = $(PEVENT_LIB_OBJS)
+PLUGIN_OBJS = plugin_jbd2.o
+PLUGIN_OBJS += plugin_hrtimer.o
+PLUGIN_OBJS += plugin_kmem.o
+PLUGIN_OBJS += plugin_kvm.o
+PLUGIN_OBJS += plugin_mac80211.o
+PLUGIN_OBJS += plugin_sched_switch.o
+PLUGIN_OBJS += plugin_function.o
+PLUGIN_OBJS += plugin_xen.o
+PLUGIN_OBJS += plugin_scsi.o
+PLUGIN_OBJS += plugin_cfg80211.o
+
+PLUGINS := $(PLUGIN_OBJS:.o=.so)
+
+ALL_OBJS = $(PEVENT_LIB_OBJS) $(PLUGIN_OBJS)
-CMD_TARGETS = $(LIB_FILE)
+CMD_TARGETS = $(LIB_FILE) $(PLUGINS)
TARGETS = $(CMD_TARGETS)
@@ -195,32 +204,40 @@ all: all_cmd
all_cmd: $(CMD_TARGETS)
libtraceevent.so: $(PEVENT_LIB_OBJS)
- $(Q)$(do_compile_shared_library)
+ $(QUIET_LINK)$(CC) --shared $^ -o $@
libtraceevent.a: $(PEVENT_LIB_OBJS)
- $(Q)$(do_build_static_lib)
+ $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
+
+plugins: $(PLUGINS)
$(PEVENT_LIB_OBJS): %.o: $(src)/%.c TRACEEVENT-CFLAGS
- $(Q)$(do_fpic_compile)
+ $(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@
+
+$(PLUGIN_OBJS): %.o : $(src)/%.c
+ $(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) -fPIC -o $@ $<
+
+$(PLUGINS): %.so: %.o
+ $(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<
define make_version.h
- (echo '/* This file is automatically generated. Do not modify. */'; \
- echo \#define VERSION_CODE $(shell \
- expr $(VERSION) \* 256 + $(PATCHLEVEL)); \
- echo '#define EXTRAVERSION ' $(EXTRAVERSION); \
- echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \
- echo '#define FILE_VERSION '$(FILE_VERSION); \
- ) > $1
+ (echo '/* This file is automatically generated. Do not modify. */'; \
+ echo \#define VERSION_CODE $(shell \
+ expr $(VERSION) \* 256 + $(PATCHLEVEL)); \
+ echo '#define EXTRAVERSION ' $(EXTRAVERSION); \
+ echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \
+ echo '#define FILE_VERSION '$(FILE_VERSION); \
+ ) > $1
endef
define update_version.h
- ($(call make_version.h, $@.tmp); \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
+ ($(call make_version.h, $@.tmp); \
+ if [ -r $@ ] && cmp -s $@ $@.tmp; then \
+ rm -f $@.tmp; \
+ else \
+ echo ' UPDATE $@'; \
+ mv -f $@.tmp $@; \
+ fi);
endef
ep_version.h: force
@@ -229,13 +246,13 @@ ep_version.h: force
VERSION_FILES = ep_version.h
define update_dir
- (echo $1 > $@.tmp; \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
+ (echo $1 > $@.tmp; \
+ if [ -r $@ ] && cmp -s $@ $@.tmp; then \
+ rm -f $@.tmp; \
+ else \
+ echo ' UPDATE $@'; \
+ mv -f $@.tmp $@; \
+ fi);
endef
## make deps
@@ -245,10 +262,10 @@ all_deps := $(all_objs:%.o=.%.d)
# let .d file also depends on the source and header files
define check_deps
- @set -e; $(RM) $@; \
- $(CC) -MM $(CFLAGS) $< > $@.$$$$; \
- sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
- $(RM) $@.$$$$
+ @set -e; $(RM) $@; \
+ $(CC) -MM $(CFLAGS) $< > $@.$$$$; \
+ sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
+ $(RM) $@.$$$$
endef
$(all_deps): .%.d: $(src)/%.c
@@ -283,27 +300,41 @@ TAGS: force
--regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
define do_install
- $(print_install) \
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
$(INSTALL) $1 '$(DESTDIR_SQ)$2'
endef
-install_lib: all_cmd
- $(Q)$(call do_install,$(LIB_FILE),$(bindir_SQ))
+define do_install_plugins
+ for plugin in $1; do \
+ $(call do_install,$$plugin,$(plugin_dir_SQ)); \
+ done
+endef
+
+install_lib: all_cmd install_plugins
+ $(call QUIET_INSTALL, $(LIB_FILE)) \
+ $(call do_install,$(LIB_FILE),$(bindir_SQ))
+
+install_plugins: $(PLUGINS)
+ $(call QUIET_INSTALL, trace_plugins) \
+ $(call do_install_plugins, $(PLUGINS))
install: install_lib
clean:
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
- $(RM) TRACEEVENT-CFLAGS tags TAGS
+ $(call QUIET_CLEAN, libtraceevent) \
+ $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
+ $(RM) TRACEEVENT-CFLAGS tags TAGS
endif # skip-makefile
-PHONY += force
+PHONY += force plugins
force:
+plugins:
+ @echo > /dev/null
+
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 217c82ee..1587ea39 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -2710,7 +2710,6 @@ process_func_handler(struct event_format *event, struct pevent_function_handler
struct print_arg *farg;
enum event_type type;
char *token;
- const char *test;
int i;
arg->type = PRINT_FUNC;
@@ -2727,15 +2726,19 @@ process_func_handler(struct event_format *event, struct pevent_function_handler
}
type = process_arg(event, farg, &token);
- if (i < (func->nr_args - 1))
- test = ",";
- else
- test = ")";
-
- if (test_type_token(type, token, EVENT_DELIM, test)) {
- free_arg(farg);
- free_token(token);
- return EVENT_ERROR;
+ if (i < (func->nr_args - 1)) {
+ if (type != EVENT_DELIM || strcmp(token, ",") != 0) {
+ warning("Error: function '%s()' expects %d arguments but event %s only uses %d",
+ func->name, func->nr_args,
+ event->name, i + 1);
+ goto err;
+ }
+ } else {
+ if (type != EVENT_DELIM || strcmp(token, ")") != 0) {
+ warning("Error: function '%s()' only expects %d arguments but event %s has more",
+ func->name, func->nr_args, event->name);
+ goto err;
+ }
}
*next_arg = farg;
@@ -2747,6 +2750,11 @@ process_func_handler(struct event_format *event, struct pevent_function_handler
*tok = token;
return type;
+
+err:
+ free_arg(farg);
+ free_token(token);
+ return EVENT_ERROR;
}
static enum event_type
@@ -4099,6 +4107,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
unsigned long long val;
struct func_map *func;
const char *saveptr;
+ struct trace_seq p;
char *bprint_fmt = NULL;
char format[32];
int show_func;
@@ -4306,8 +4315,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
format[len] = 0;
if (!len_as_arg)
len_arg = -1;
- print_str_arg(s, data, size, event,
+ /* Use helper trace_seq */
+ trace_seq_init(&p);
+ print_str_arg(&p, data, size, event,
format, len_arg, arg);
+ trace_seq_terminate(&p);
+ trace_seq_puts(s, p.buffer);
arg = arg->next;
break;
default:
@@ -5116,8 +5129,38 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
return ret;
}
+static enum pevent_errno
+__pevent_parse_event(struct pevent *pevent,
+ struct event_format **eventp,
+ const char *buf, unsigned long size,
+ const char *sys)
+{
+ int ret = __pevent_parse_format(eventp, pevent, buf, size, sys);
+ struct event_format *event = *eventp;
+
+ if (event == NULL)
+ return ret;
+
+ if (pevent && add_event(pevent, event)) {
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ goto event_add_failed;
+ }
+
+#define PRINT_ARGS 0
+ if (PRINT_ARGS && event->print_fmt.args)
+ print_args(event->print_fmt.args);
+
+ return 0;
+
+event_add_failed:
+ pevent_free_format(event);
+ return ret;
+}
+
/**
* pevent_parse_format - parse the event format
+ * @pevent: the handle to the pevent
+ * @eventp: returned format
* @buf: the buffer storing the event format string
* @size: the size of @buf
* @sys: the system the event belongs to
@@ -5129,10 +5172,12 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf,
+enum pevent_errno pevent_parse_format(struct pevent *pevent,
+ struct event_format **eventp,
+ const char *buf,
unsigned long size, const char *sys)
{
- return __pevent_parse_format(eventp, NULL, buf, size, sys);
+ return __pevent_parse_event(pevent, eventp, buf, size, sys);
}
/**
@@ -5153,25 +5198,7 @@ enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
unsigned long size, const char *sys)
{
struct event_format *event = NULL;
- int ret = __pevent_parse_format(&event, pevent, buf, size, sys);
-
- if (event == NULL)
- return ret;
-
- if (add_event(pevent, event)) {
- ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
- goto event_add_failed;
- }
-
-#define PRINT_ARGS 0
- if (PRINT_ARGS && event->print_fmt.args)
- print_args(event->print_fmt.args);
-
- return 0;
-
-event_add_failed:
- pevent_free_format(event);
- return ret;
+ return __pevent_parse_event(pevent, &event, buf, size, sys);
}
#undef _PE
@@ -5203,22 +5230,7 @@ int pevent_strerror(struct pevent *pevent __maybe_unused,
idx = errnum - __PEVENT_ERRNO__START - 1;
msg = pevent_error_str[idx];
-
- switch (errnum) {
- case PEVENT_ERRNO__MEM_ALLOC_FAILED:
- case PEVENT_ERRNO__PARSE_EVENT_FAILED:
- case PEVENT_ERRNO__READ_ID_FAILED:
- case PEVENT_ERRNO__READ_FORMAT_FAILED:
- case PEVENT_ERRNO__READ_PRINT_FAILED:
- case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED:
- case PEVENT_ERRNO__INVALID_ARG_TYPE:
- snprintf(buf, buflen, "%s", msg);
- break;
-
- default:
- /* cannot reach here */
- break;
- }
+ snprintf(buf, buflen, "%s", msg);
return 0;
}
@@ -5549,6 +5561,52 @@ int pevent_register_print_function(struct pevent *pevent,
}
/**
+ * pevent_unregister_print_function - unregister a helper function
+ * @pevent: the handle to the pevent
+ * @func: the function to process the helper function
+ * @name: the name of the helper function
+ *
+ * This function removes existing print handler for function @name.
+ *
+ * Returns 0 if the handler was removed successully, -1 otherwise.
+ */
+int pevent_unregister_print_function(struct pevent *pevent,
+ pevent_func_handler func, char *name)
+{
+ struct pevent_function_handler *func_handle;
+
+ func_handle = find_func_handler(pevent, name);
+ if (func_handle && func_handle->func == func) {
+ remove_func_handler(pevent, name);
+ return 0;
+ }
+ return -1;
+}
+
+static struct event_format *pevent_search_event(struct pevent *pevent, int id,
+ const char *sys_name,
+ const char *event_name)
+{
+ struct event_format *event;
+
+ if (id >= 0) {
+ /* search by id */
+ event = pevent_find_event(pevent, id);
+ if (!event)
+ return NULL;
+ if (event_name && (strcmp(event_name, event->name) != 0))
+ return NULL;
+ if (sys_name && (strcmp(sys_name, event->system) != 0))
+ return NULL;
+ } else {
+ event = pevent_find_event_by_name(pevent, sys_name, event_name);
+ if (!event)
+ return NULL;
+ }
+ return event;
+}
+
+/**
* pevent_register_event_handler - register a way to parse an event
* @pevent: the handle to the pevent
* @id: the id of the event to register
@@ -5572,20 +5630,9 @@ int pevent_register_event_handler(struct pevent *pevent, int id,
struct event_format *event;
struct event_handler *handle;
- if (id >= 0) {
- /* search by id */
- event = pevent_find_event(pevent, id);
- if (!event)
- goto not_found;
- if (event_name && (strcmp(event_name, event->name) != 0))
- goto not_found;
- if (sys_name && (strcmp(sys_name, event->system) != 0))
- goto not_found;
- } else {
- event = pevent_find_event_by_name(pevent, sys_name, event_name);
- if (!event)
- goto not_found;
- }
+ event = pevent_search_event(pevent, id, sys_name, event_name);
+ if (event == NULL)
+ goto not_found;
pr_stat("overriding event (%d) %s:%s with new print handler",
event->id, event->system, event->name);
@@ -5625,6 +5672,79 @@ int pevent_register_event_handler(struct pevent *pevent, int id,
return -1;
}
+static int handle_matches(struct event_handler *handler, int id,
+ const char *sys_name, const char *event_name,
+ pevent_event_handler_func func, void *context)
+{
+ if (id >= 0 && id != handler->id)
+ return 0;
+
+ if (event_name && (strcmp(event_name, handler->event_name) != 0))
+ return 0;
+
+ if (sys_name && (strcmp(sys_name, handler->sys_name) != 0))
+ return 0;
+
+ if (func != handler->func || context != handler->context)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * pevent_unregister_event_handler - unregister an existing event handler
+ * @pevent: the handle to the pevent
+ * @id: the id of the event to unregister
+ * @sys_name: the system name the handler belongs to
+ * @event_name: the name of the event handler
+ * @func: the function to call to parse the event information
+ * @context: the data to be passed to @func
+ *
+ * This function removes existing event handler (parser).
+ *
+ * If @id is >= 0, then it is used to find the event.
+ * else @sys_name and @event_name are used.
+ *
+ * Returns 0 if handler was removed successfully, -1 if event was not found.
+ */
+int pevent_unregister_event_handler(struct pevent *pevent, int id,
+ const char *sys_name, const char *event_name,
+ pevent_event_handler_func func, void *context)
+{
+ struct event_format *event;
+ struct event_handler *handle;
+ struct event_handler **next;
+
+ event = pevent_search_event(pevent, id, sys_name, event_name);
+ if (event == NULL)
+ goto not_found;
+
+ if (event->handler == func && event->context == context) {
+ pr_stat("removing override handler for event (%d) %s:%s. Going back to default handler.",
+ event->id, event->system, event->name);
+
+ event->handler = NULL;
+ event->context = NULL;
+ return 0;
+ }
+
+not_found:
+ for (next = &pevent->handlers; *next; next = &(*next)->next) {
+ handle = *next;
+ if (handle_matches(handle, id, sys_name, event_name,
+ func, context))
+ break;
+ }
+
+ if (!(*next))
+ return -1;
+
+ *next = handle->next;
+ free_handler(handle);
+
+ return 0;
+}
+
/**
* pevent_alloc - create a pevent handle
*/
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 8d73d25..791c539 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -23,6 +23,7 @@
#include <stdbool.h>
#include <stdarg.h>
#include <regex.h>
+#include <string.h>
#ifndef __maybe_unused
#define __maybe_unused __attribute__((unused))
@@ -57,6 +58,12 @@ struct pevent_record {
#endif
};
+enum trace_seq_fail {
+ TRACE_SEQ__GOOD,
+ TRACE_SEQ__BUFFER_POISONED,
+ TRACE_SEQ__MEM_ALLOC_FAILED,
+};
+
/*
* Trace sequences are used to allow a function to call several other functions
* to create a string of data to use (up to a max of PAGE_SIZE).
@@ -67,6 +74,7 @@ struct trace_seq {
unsigned int buffer_size;
unsigned int len;
unsigned int readpos;
+ enum trace_seq_fail state;
};
void trace_seq_init(struct trace_seq *s);
@@ -97,7 +105,7 @@ typedef int (*pevent_event_handler_func)(struct trace_seq *s,
void *context);
typedef int (*pevent_plugin_load_func)(struct pevent *pevent);
-typedef int (*pevent_plugin_unload_func)(void);
+typedef int (*pevent_plugin_unload_func)(struct pevent *pevent);
struct plugin_option {
struct plugin_option *next;
@@ -122,7 +130,7 @@ struct plugin_option {
* PEVENT_PLUGIN_UNLOADER: (optional)
* The function called just before unloading
*
- * int PEVENT_PLUGIN_UNLOADER(void)
+ * int PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
*
* PEVENT_PLUGIN_OPTIONS: (optional)
* Plugin options that can be set before loading
@@ -355,12 +363,35 @@ enum pevent_flag {
_PE(READ_FORMAT_FAILED, "failed to read event format"), \
_PE(READ_PRINT_FAILED, "failed to read event print fmt"), \
_PE(OLD_FTRACE_ARG_FAILED,"failed to allocate field name for ftrace"),\
- _PE(INVALID_ARG_TYPE, "invalid argument type")
+ _PE(INVALID_ARG_TYPE, "invalid argument type"), \
+ _PE(INVALID_EXP_TYPE, "invalid expression type"), \
+ _PE(INVALID_OP_TYPE, "invalid operator type"), \
+ _PE(INVALID_EVENT_NAME, "invalid event name"), \
+ _PE(EVENT_NOT_FOUND, "no event found"), \
+ _PE(SYNTAX_ERROR, "syntax error"), \
+ _PE(ILLEGAL_RVALUE, "illegal rvalue"), \
+ _PE(ILLEGAL_LVALUE, "illegal lvalue for string comparison"), \
+ _PE(INVALID_REGEX, "regex did not compute"), \
+ _PE(ILLEGAL_STRING_CMP, "illegal comparison for string"), \
+ _PE(ILLEGAL_INTEGER_CMP,"illegal comparison for integer"), \
+ _PE(REPARENT_NOT_OP, "cannot reparent other than OP"), \
+ _PE(REPARENT_FAILED, "failed to reparent filter OP"), \
+ _PE(BAD_FILTER_ARG, "bad arg in filter tree"), \
+ _PE(UNEXPECTED_TYPE, "unexpected type (not a value)"), \
+ _PE(ILLEGAL_TOKEN, "illegal token"), \
+ _PE(INVALID_PAREN, "open parenthesis cannot come here"), \
+ _PE(UNBALANCED_PAREN, "unbalanced number of parenthesis"), \
+ _PE(UNKNOWN_TOKEN, "unknown token"), \
+ _PE(FILTER_NOT_FOUND, "no filter found"), \
+ _PE(NOT_A_NUMBER, "must have number field"), \
+ _PE(NO_FILTER, "no filters exists"), \
+ _PE(FILTER_MISS, "record does not match to filter")
#undef _PE
#define _PE(__code, __str) PEVENT_ERRNO__ ## __code
enum pevent_errno {
PEVENT_ERRNO__SUCCESS = 0,
+ PEVENT_ERRNO__FILTER_MATCH = PEVENT_ERRNO__SUCCESS,
/*
* Choose an arbitrary negative big number not to clash with standard
@@ -377,6 +408,12 @@ enum pevent_errno {
};
#undef _PE
+struct plugin_list;
+
+struct plugin_list *traceevent_load_plugins(struct pevent *pevent);
+void traceevent_unload_plugins(struct plugin_list *plugin_list,
+ struct pevent *pevent);
+
struct cmdline;
struct cmdline_list;
struct func_map;
@@ -522,6 +559,15 @@ __data2host8(struct pevent *pevent, unsigned long long data)
__data2host8(pevent, __val); \
})
+static inline int traceevent_host_bigendian(void)
+{
+ unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
+ unsigned int val;
+
+ memcpy(&val, str, 4);
+ return val == 0x01020304;
+}
+
/* taken from kernel/trace/trace.h */
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -547,7 +593,9 @@ int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long siz
enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
unsigned long size, const char *sys);
-enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf,
+enum pevent_errno pevent_parse_format(struct pevent *pevent,
+ struct event_format **eventp,
+ const char *buf,
unsigned long size, const char *sys);
void pevent_free_format(struct event_format *event);
@@ -576,10 +624,15 @@ int pevent_print_func_field(struct trace_seq *s, const char *fmt,
int pevent_register_event_handler(struct pevent *pevent, int id,
const char *sys_name, const char *event_name,
pevent_event_handler_func func, void *context);
+int pevent_unregister_event_handler(struct pevent *pevent, int id,
+ const char *sys_name, const char *event_name,
+ pevent_event_handler_func func, void *context);
int pevent_register_print_function(struct pevent *pevent,
pevent_func_handler func,
enum pevent_func_arg_type ret_type,
char *name, ...);
+int pevent_unregister_print_function(struct pevent *pevent,
+ pevent_func_handler func, char *name);
struct format_field *pevent_find_common_field(struct event_format *event, const char *name);
struct format_field *pevent_find_field(struct event_format *event, const char *name);
@@ -811,18 +864,22 @@ struct filter_type {
struct filter_arg *filter;
};
+#define PEVENT_FILTER_ERROR_BUFSZ 1024
+
struct event_filter {
struct pevent *pevent;
int filters;
struct filter_type *event_filters;
+ char error_buffer[PEVENT_FILTER_ERROR_BUFSZ];
};
struct event_filter *pevent_filter_alloc(struct pevent *pevent);
-#define FILTER_NONE -2
-#define FILTER_NOEXIST -1
-#define FILTER_MISS 0
-#define FILTER_MATCH 1
+/* for backward compatibility */
+#define FILTER_NONE PEVENT_ERRNO__FILTER_NOT_FOUND
+#define FILTER_NOEXIST PEVENT_ERRNO__NO_FILTER
+#define FILTER_MISS PEVENT_ERRNO__FILTER_MISS
+#define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH
enum filter_trivial_type {
FILTER_TRIVIAL_FALSE,
@@ -830,20 +887,21 @@ enum filter_trivial_type {
FILTER_TRIVIAL_BOTH,
};
-int pevent_filter_add_filter_str(struct event_filter *filter,
- const char *filter_str,
- char **error_str);
+enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
+ const char *filter_str);
+enum pevent_errno pevent_filter_match(struct event_filter *filter,
+ struct pevent_record *record);
-int pevent_filter_match(struct event_filter *filter,
- struct pevent_record *record);
+int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err,
+ char *buf, size_t buflen);
int pevent_event_filtered(struct event_filter *filter,
int event_id);
void pevent_filter_reset(struct event_filter *filter);
-void pevent_filter_clear_trivial(struct event_filter *filter,
+int pevent_filter_clear_trivial(struct event_filter *filter,
enum filter_trivial_type type);
void pevent_filter_free(struct event_filter *filter);
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
new file mode 100644
index 0000000..0c8bf67
--- /dev/null
+++ b/tools/lib/traceevent/event-plugin.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <string.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <dirent.h>
+#include "event-parse.h"
+#include "event-utils.h"
+
+#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
+
+struct plugin_list {
+ struct plugin_list *next;
+ char *name;
+ void *handle;
+};
+
+static void
+load_plugin(struct pevent *pevent, const char *path,
+ const char *file, void *data)
+{
+ struct plugin_list **plugin_list = data;
+ pevent_plugin_load_func func;
+ struct plugin_list *list;
+ const char *alias;
+ char *plugin;
+ void *handle;
+
+ plugin = malloc(strlen(path) + strlen(file) + 2);
+ if (!plugin) {
+ warning("could not allocate plugin memory\n");
+ return;
+ }
+
+ strcpy(plugin, path);
+ strcat(plugin, "/");
+ strcat(plugin, file);
+
+ handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL);
+ if (!handle) {
+ warning("could not load plugin '%s'\n%s\n",
+ plugin, dlerror());
+ goto out_free;
+ }
+
+ alias = dlsym(handle, PEVENT_PLUGIN_ALIAS_NAME);
+ if (!alias)
+ alias = file;
+
+ func = dlsym(handle, PEVENT_PLUGIN_LOADER_NAME);
+ if (!func) {
+ warning("could not find func '%s' in plugin '%s'\n%s\n",
+ PEVENT_PLUGIN_LOADER_NAME, plugin, dlerror());
+ goto out_free;
+ }
+
+ list = malloc(sizeof(*list));
+ if (!list) {
+ warning("could not allocate plugin memory\n");
+ goto out_free;
+ }
+
+ list->next = *plugin_list;
+ list->handle = handle;
+ list->name = plugin;
+ *plugin_list = list;
+
+ pr_stat("registering plugin: %s", plugin);
+ func(pevent);
+ return;
+
+ out_free:
+ free(plugin);
+}
+
+static void
+load_plugins_dir(struct pevent *pevent, const char *suffix,
+ const char *path,
+ void (*load_plugin)(struct pevent *pevent,
+ const char *path,
+ const char *name,
+ void *data),
+ void *data)
+{
+ struct dirent *dent;
+ struct stat st;
+ DIR *dir;
+ int ret;
+
+ ret = stat(path, &st);
+ if (ret < 0)
+ return;
+
+ if (!S_ISDIR(st.st_mode))
+ return;
+
+ dir = opendir(path);
+ if (!dir)
+ return;
+
+ while ((dent = readdir(dir))) {
+ const char *name = dent->d_name;
+
+ if (strcmp(name, ".") == 0 ||
+ strcmp(name, "..") == 0)
+ continue;
+
+ /* Only load plugins that end in suffix */
+ if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
+ continue;
+
+ load_plugin(pevent, path, name, data);
+ }
+
+ closedir(dir);
+}
+
+static void
+load_plugins(struct pevent *pevent, const char *suffix,
+ void (*load_plugin)(struct pevent *pevent,
+ const char *path,
+ const char *name,
+ void *data),
+ void *data)
+{
+ char *home;
+ char *path;
+ char *envdir;
+
+ /*
+ * If a system plugin directory was defined,
+ * check that first.
+ */
+#ifdef PLUGIN_DIR
+ load_plugins_dir(pevent, suffix, PLUGIN_DIR, load_plugin, data);
+#endif
+
+ /*
+ * Next let the environment-set plugin directory
+ * override the system defaults.
+ */
+ envdir = getenv("TRACEEVENT_PLUGIN_DIR");
+ if (envdir)
+ load_plugins_dir(pevent, suffix, envdir, load_plugin, data);
+
+ /*
+ * Now let the home directory override the environment
+ * or system defaults.
+ */
+ home = getenv("HOME");
+ if (!home)
+ return;
+
+ path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2);
+ if (!path) {
+ warning("could not allocate plugin memory\n");
+ return;
+ }
+
+ strcpy(path, home);
+ strcat(path, "/");
+ strcat(path, LOCAL_PLUGIN_DIR);
+
+ load_plugins_dir(pevent, suffix, path, load_plugin, data);
+
+ free(path);
+}
+
+struct plugin_list*
+traceevent_load_plugins(struct pevent *pevent)
+{
+ struct plugin_list *list = NULL;
+
+ load_plugins(pevent, ".so", load_plugin, &list);
+ return list;
+}
+
+void
+traceevent_unload_plugins(struct plugin_list *plugin_list, struct pevent *pevent)
+{
+ pevent_plugin_unload_func func;
+ struct plugin_list *list;
+
+ while (plugin_list) {
+ list = plugin_list;
+ plugin_list = list->next;
+ func = dlsym(list->handle, PEVENT_PLUGIN_UNLOADER_NAME);
+ if (func)
+ func(pevent);
+ dlclose(list->handle);
+ free(list->name);
+ free(list);
+ }
+}
diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h
index e76c9ac..d1dc217 100644
--- a/tools/lib/traceevent/event-utils.h
+++ b/tools/lib/traceevent/event-utils.h
@@ -23,18 +23,14 @@
#include <ctype.h>
/* Can be overridden */
-void die(const char *fmt, ...);
-void *malloc_or_die(unsigned int size);
void warning(const char *fmt, ...);
void pr_stat(const char *fmt, ...);
void vpr_stat(const char *fmt, va_list ap);
/* Always available */
-void __die(const char *fmt, ...);
void __warning(const char *fmt, ...);
void __pr_stat(const char *fmt, ...);
-void __vdie(const char *fmt, ...);
void __vwarning(const char *fmt, ...);
void __vpr_stat(const char *fmt, ...);
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 2500e75..b502344 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -38,41 +38,31 @@ struct event_list {
struct event_format *event;
};
-#define MAX_ERR_STR_SIZE 256
-
-static void show_error(char **error_str, const char *fmt, ...)
+static void show_error(char *error_buf, const char *fmt, ...)
{
unsigned long long index;
const char *input;
- char *error;
va_list ap;
int len;
int i;
- if (!error_str)
- return;
-
input = pevent_get_input_buf();
index = pevent_get_input_buf_ptr();
len = input ? strlen(input) : 0;
- error = malloc_or_die(MAX_ERR_STR_SIZE + (len*2) + 3);
-
if (len) {
- strcpy(error, input);
- error[len] = '\n';
+ strcpy(error_buf, input);
+ error_buf[len] = '\n';
for (i = 1; i < len && i < index; i++)
- error[len+i] = ' ';
- error[len + i] = '^';
- error[len + i + 1] = '\n';
+ error_buf[len+i] = ' ';
+ error_buf[len + i] = '^';
+ error_buf[len + i + 1] = '\n';
len += i+2;
}
va_start(ap, fmt);
- vsnprintf(error + len, MAX_ERR_STR_SIZE, fmt, ap);
+ vsnprintf(error_buf + len, PEVENT_FILTER_ERROR_BUFSZ - len, fmt, ap);
va_end(ap);
-
- *error_str = error;
}
static void free_token(char *token)
@@ -95,7 +85,11 @@ static enum event_type read_token(char **tok)
(strcmp(token, "=") == 0 || strcmp(token, "!") == 0) &&
pevent_peek_char() == '~') {
/* append it */
- *tok = malloc_or_die(3);
+ *tok = malloc(3);
+ if (*tok == NULL) {
+ free_token(token);
+ return EVENT_ERROR;
+ }
sprintf(*tok, "%c%c", *token, '~');
free_token(token);
/* Now remove the '~' from the buffer */
@@ -147,11 +141,13 @@ add_filter_type(struct event_filter *filter, int id)
if (filter_type)
return filter_type;
- filter->event_filters = realloc(filter->event_filters,
- sizeof(*filter->event_filters) *
- (filter->filters + 1));
- if (!filter->event_filters)
- die("Could not allocate filter");
+ filter_type = realloc(filter->event_filters,
+ sizeof(*filter->event_filters) *
+ (filter->filters + 1));
+ if (!filter_type)
+ return NULL;
+
+ filter->event_filters = filter_type;
for (i = 0; i < filter->filters; i++) {
if (filter->event_filters[i].event_id > id)
@@ -182,7 +178,10 @@ struct event_filter *pevent_filter_alloc(struct pevent *pevent)
{
struct event_filter *filter;
- filter = malloc_or_die(sizeof(*filter));
+ filter = malloc(sizeof(*filter));
+ if (filter == NULL)
+ return NULL;
+
memset(filter, 0, sizeof(*filter));
filter->pevent = pevent;
pevent_ref(pevent);
@@ -192,12 +191,7 @@ struct event_filter *pevent_filter_alloc(struct pevent *pevent)
static struct filter_arg *allocate_arg(void)
{
- struct filter_arg *arg;
-
- arg = malloc_or_die(sizeof(*arg));
- memset(arg, 0, sizeof(*arg));
-
- return arg;
+ return calloc(1, sizeof(struct filter_arg));
}
static void free_arg(struct filter_arg *arg)
@@ -242,15 +236,19 @@ static void free_arg(struct filter_arg *arg)
free(arg);
}
-static void add_event(struct event_list **events,
+static int add_event(struct event_list **events,
struct event_format *event)
{
struct event_list *list;
- list = malloc_or_die(sizeof(*list));
+ list = malloc(sizeof(*list));
+ if (list == NULL)
+ return -1;
+
list->next = *events;
*events = list;
list->event = event;
+ return 0;
}
static int event_match(struct event_format *event,
@@ -265,7 +263,7 @@ static int event_match(struct event_format *event,
!regexec(ereg, event->name, 0, NULL, 0);
}
-static int
+static enum pevent_errno
find_event(struct pevent *pevent, struct event_list **events,
char *sys_name, char *event_name)
{
@@ -273,6 +271,7 @@ find_event(struct pevent *pevent, struct event_list **events,
regex_t ereg;
regex_t sreg;
int match = 0;
+ int fail = 0;
char *reg;
int ret;
int i;
@@ -283,23 +282,31 @@ find_event(struct pevent *pevent, struct event_list **events,
sys_name = NULL;
}
- reg = malloc_or_die(strlen(event_name) + 3);
+ reg = malloc(strlen(event_name) + 3);
+ if (reg == NULL)
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+
sprintf(reg, "^%s$", event_name);
ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
free(reg);
if (ret)
- return -1;
+ return PEVENT_ERRNO__INVALID_EVENT_NAME;
if (sys_name) {
- reg = malloc_or_die(strlen(sys_name) + 3);
+ reg = malloc(strlen(sys_name) + 3);
+ if (reg == NULL) {
+ regfree(&ereg);
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
+
sprintf(reg, "^%s$", sys_name);
ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
free(reg);
if (ret) {
regfree(&ereg);
- return -1;
+ return PEVENT_ERRNO__INVALID_EVENT_NAME;
}
}
@@ -307,7 +314,10 @@ find_event(struct pevent *pevent, struct event_list **events,
event = pevent->events[i];
if (event_match(event, sys_name ? &sreg : NULL, &ereg)) {
match = 1;
- add_event(events, event);
+ if (add_event(events, event) < 0) {
+ fail = 1;
+ break;
+ }
}
}
@@ -316,7 +326,9 @@ find_event(struct pevent *pevent, struct event_list **events,
regfree(&sreg);
if (!match)
- return -1;
+ return PEVENT_ERRNO__EVENT_NOT_FOUND;
+ if (fail)
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
return 0;
}
@@ -332,14 +344,18 @@ static void free_events(struct event_list *events)
}
}
-static struct filter_arg *
+static enum pevent_errno
create_arg_item(struct event_format *event, const char *token,
- enum event_type type, char **error_str)
+ enum event_type type, struct filter_arg **parg, char *error_str)
{
struct format_field *field;
struct filter_arg *arg;
arg = allocate_arg();
+ if (arg == NULL) {
+ show_error(error_str, "failed to allocate filter arg");
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
switch (type) {
@@ -349,8 +365,11 @@ create_arg_item(struct event_format *event, const char *token,
arg->value.type =
type == EVENT_DQUOTE ? FILTER_STRING : FILTER_CHAR;
arg->value.str = strdup(token);
- if (!arg->value.str)
- die("malloc string");
+ if (!arg->value.str) {
+ free_arg(arg);
+ show_error(error_str, "failed to allocate string filter arg");
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
break;
case EVENT_ITEM:
/* if it is a number, then convert it */
@@ -377,11 +396,11 @@ create_arg_item(struct event_format *event, const char *token,
break;
default:
free_arg(arg);
- show_error(error_str, "expected a value but found %s",
- token);
- return NULL;
+ show_error(error_str, "expected a value but found %s", token);
+ return PEVENT_ERRNO__UNEXPECTED_TYPE;
}
- return arg;
+ *parg = arg;
+ return 0;
}
static struct filter_arg *
@@ -390,6 +409,9 @@ create_arg_op(enum filter_op_type btype)
struct filter_arg *arg;
arg = allocate_arg();
+ if (!arg)
+ return NULL;
+
arg->type = FILTER_ARG_OP;
arg->op.type = btype;
@@ -402,6 +424,9 @@ create_arg_exp(enum filter_exp_type etype)
struct filter_arg *arg;
arg = allocate_arg();
+ if (!arg)
+ return NULL;
+
arg->type = FILTER_ARG_EXP;
arg->op.type = etype;
@@ -414,6 +439,9 @@ create_arg_cmp(enum filter_exp_type etype)
struct filter_arg *arg;
arg = allocate_arg();
+ if (!arg)
+ return NULL;
+
/* Use NUM and change if necessary */
arg->type = FILTER_ARG_NUM;
arg->op.type = etype;
@@ -421,8 +449,8 @@ create_arg_cmp(enum filter_exp_type etype)
return arg;
}
-static int add_right(struct filter_arg *op, struct filter_arg *arg,
- char **error_str)
+static enum pevent_errno
+add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
{
struct filter_arg *left;
char *str;
@@ -453,9 +481,8 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg,
case FILTER_ARG_FIELD:
break;
default:
- show_error(error_str,
- "Illegal rvalue");
- return -1;
+ show_error(error_str, "Illegal rvalue");
+ return PEVENT_ERRNO__ILLEGAL_RVALUE;
}
/*
@@ -502,7 +529,7 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg,
if (left->type != FILTER_ARG_FIELD) {
show_error(error_str,
"Illegal lvalue for string comparison");
- return -1;
+ return PEVENT_ERRNO__ILLEGAL_LVALUE;
}
/* Make sure this is a valid string compare */
@@ -521,25 +548,31 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg,
show_error(error_str,
"RegEx '%s' did not compute",
str);
- return -1;
+ return PEVENT_ERRNO__INVALID_REGEX;
}
break;
default:
show_error(error_str,
"Illegal comparison for string");
- return -1;
+ return PEVENT_ERRNO__ILLEGAL_STRING_CMP;
}
op->type = FILTER_ARG_STR;
op->str.type = op_type;
op->str.field = left->field.field;
op->str.val = strdup(str);
- if (!op->str.val)
- die("malloc string");
+ if (!op->str.val) {
+ show_error(error_str, "Failed to allocate string filter");
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
/*
* Need a buffer to copy data for tests
*/
- op->str.buffer = malloc_or_die(op->str.field->size + 1);
+ op->str.buffer = malloc(op->str.field->size + 1);
+ if (!op->str.buffer) {
+ show_error(error_str, "Failed to allocate string filter");
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
/* Null terminate this buffer */
op->str.buffer[op->str.field->size] = 0;
@@ -557,7 +590,7 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg,
case FILTER_CMP_NOT_REGEX:
show_error(error_str,
"Op not allowed with integers");
- return -1;
+ return PEVENT_ERRNO__ILLEGAL_INTEGER_CMP;
default:
break;
@@ -577,9 +610,8 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg,
return 0;
out_fail:
- show_error(error_str,
- "Syntax error");
- return -1;
+ show_error(error_str, "Syntax error");
+ return PEVENT_ERRNO__SYNTAX_ERROR;
}
static struct filter_arg *
@@ -592,7 +624,7 @@ rotate_op_right(struct filter_arg *a, struct filter_arg *b)
return arg;
}
-static int add_left(struct filter_arg *op, struct filter_arg *arg)
+static enum pevent_errno add_left(struct filter_arg *op, struct filter_arg *arg)
{
switch (op->type) {
case FILTER_ARG_EXP:
@@ -611,11 +643,11 @@ static int add_left(struct filter_arg *op, struct filter_arg *arg)
/* left arg of compares must be a field */
if (arg->type != FILTER_ARG_FIELD &&
arg->type != FILTER_ARG_BOOLEAN)
- return -1;
+ return PEVENT_ERRNO__INVALID_ARG_TYPE;
op->num.left = arg;
break;
default:
- return -1;
+ return PEVENT_ERRNO__INVALID_ARG_TYPE;
}
return 0;
}
@@ -728,15 +760,18 @@ enum filter_vals {
FILTER_VAL_TRUE,
};
-void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
- struct filter_arg *arg)
+static enum pevent_errno
+reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
+ struct filter_arg *arg, char *error_str)
{
struct filter_arg *other_child;
struct filter_arg **ptr;
if (parent->type != FILTER_ARG_OP &&
- arg->type != FILTER_ARG_OP)
- die("can not reparent other than OP");
+ arg->type != FILTER_ARG_OP) {
+ show_error(error_str, "can not reparent other than OP");
+ return PEVENT_ERRNO__REPARENT_NOT_OP;
+ }
/* Get the sibling */
if (old_child->op.right == arg) {
@@ -745,8 +780,10 @@ void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
} else if (old_child->op.left == arg) {
ptr = &old_child->op.left;
other_child = old_child->op.right;
- } else
- die("Error in reparent op, find other child");
+ } else {
+ show_error(error_str, "Error in reparent op, find other child");
+ return PEVENT_ERRNO__REPARENT_FAILED;
+ }
/* Detach arg from old_child */
*ptr = NULL;
@@ -757,23 +794,29 @@ void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
*parent = *arg;
/* Free arg without recussion */
free(arg);
- return;
+ return 0;
}
if (parent->op.right == old_child)
ptr = &parent->op.right;
else if (parent->op.left == old_child)
ptr = &parent->op.left;
- else
- die("Error in reparent op");
+ else {
+ show_error(error_str, "Error in reparent op");
+ return PEVENT_ERRNO__REPARENT_FAILED;
+ }
+
*ptr = arg;
free_arg(old_child);
+ return 0;
}
-enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg)
+/* Returns either filter_vals (success) or pevent_errno (failfure) */
+static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
+ char *error_str)
{
- enum filter_vals lval, rval;
+ int lval, rval;
switch (arg->type) {
@@ -788,63 +831,68 @@ enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg)
return FILTER_VAL_NORM;
case FILTER_ARG_EXP:
- lval = test_arg(arg, arg->exp.left);
+ lval = test_arg(arg, arg->exp.left, error_str);
if (lval != FILTER_VAL_NORM)
return lval;
- rval = test_arg(arg, arg->exp.right);
+ rval = test_arg(arg, arg->exp.right, error_str);
if (rval != FILTER_VAL_NORM)
return rval;
return FILTER_VAL_NORM;
case FILTER_ARG_NUM:
- lval = test_arg(arg, arg->num.left);
+ lval = test_arg(arg, arg->num.left, error_str);
if (lval != FILTER_VAL_NORM)
return lval;
- rval = test_arg(arg, arg->num.right);
+ rval = test_arg(arg, arg->num.right, error_str);
if (rval != FILTER_VAL_NORM)
return rval;
return FILTER_VAL_NORM;
case FILTER_ARG_OP:
if (arg->op.type != FILTER_OP_NOT) {
- lval = test_arg(arg, arg->op.left);
+ lval = test_arg(arg, arg->op.left, error_str);
switch (lval) {
case FILTER_VAL_NORM:
break;
case FILTER_VAL_TRUE:
if (arg->op.type == FILTER_OP_OR)
return FILTER_VAL_TRUE;
- rval = test_arg(arg, arg->op.right);
+ rval = test_arg(arg, arg->op.right, error_str);
if (rval != FILTER_VAL_NORM)
return rval;
- reparent_op_arg(parent, arg, arg->op.right);
- return FILTER_VAL_NORM;
+ return reparent_op_arg(parent, arg, arg->op.right,
+ error_str);
case FILTER_VAL_FALSE:
if (arg->op.type == FILTER_OP_AND)
return FILTER_VAL_FALSE;
- rval = test_arg(arg, arg->op.right);
+ rval = test_arg(arg, arg->op.right, error_str);
if (rval != FILTER_VAL_NORM)
return rval;
- reparent_op_arg(parent, arg, arg->op.right);
- return FILTER_VAL_NORM;
+ return reparent_op_arg(parent, arg, arg->op.right,
+ error_str);
+
+ default:
+ return lval;
}
}
- rval = test_arg(arg, arg->op.right);
+ rval = test_arg(arg, arg->op.right, error_str);
switch (rval) {
case FILTER_VAL_NORM:
+ default:
break;
+
case FILTER_VAL_TRUE:
if (arg->op.type == FILTER_OP_OR)
return FILTER_VAL_TRUE;
if (arg->op.type == FILTER_OP_NOT)
return FILTER_VAL_FALSE;
- reparent_op_arg(parent, arg, arg->op.left);
- return FILTER_VAL_NORM;
+ return reparent_op_arg(parent, arg, arg->op.left,
+ error_str);
case FILTER_VAL_FALSE:
if (arg->op.type == FILTER_OP_AND)
@@ -852,41 +900,56 @@ enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg)
if (arg->op.type == FILTER_OP_NOT)
return FILTER_VAL_TRUE;
- reparent_op_arg(parent, arg, arg->op.left);
- return FILTER_VAL_NORM;
+ return reparent_op_arg(parent, arg, arg->op.left,
+ error_str);
}
- return FILTER_VAL_NORM;
+ return rval;
default:
- die("bad arg in filter tree");
+ show_error(error_str, "bad arg in filter tree");
+ return PEVENT_ERRNO__BAD_FILTER_ARG;
}
return FILTER_VAL_NORM;
}
/* Remove any unknown event fields */
-static struct filter_arg *collapse_tree(struct filter_arg *arg)
+static int collapse_tree(struct filter_arg *arg,
+ struct filter_arg **arg_collapsed, char *error_str)
{
- enum filter_vals ret;
+ int ret;
- ret = test_arg(arg, arg);
+ ret = test_arg(arg, arg, error_str);
switch (ret) {
case FILTER_VAL_NORM:
- return arg;
+ break;
case FILTER_VAL_TRUE:
case FILTER_VAL_FALSE:
free_arg(arg);
arg = allocate_arg();
- arg->type = FILTER_ARG_BOOLEAN;
- arg->boolean.value = ret == FILTER_VAL_TRUE;
+ if (arg) {
+ arg->type = FILTER_ARG_BOOLEAN;
+ arg->boolean.value = ret == FILTER_VAL_TRUE;
+ } else {
+ show_error(error_str, "Failed to allocate filter arg");
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
+ break;
+
+ default:
+ /* test_arg() already set the error_str */
+ free_arg(arg);
+ arg = NULL;
+ break;
}
- return arg;
+ *arg_collapsed = arg;
+ return ret;
}
-static int
+static enum pevent_errno
process_filter(struct event_format *event, struct filter_arg **parg,
- char **error_str, int not)
+ char *error_str, int not)
{
enum event_type type;
char *token = NULL;
@@ -898,7 +961,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
enum filter_op_type btype;
enum filter_exp_type etype;
enum filter_cmp_type ctype;
- int ret;
+ enum pevent_errno ret;
*parg = NULL;
@@ -909,8 +972,8 @@ process_filter(struct event_format *event, struct filter_arg **parg,
case EVENT_SQUOTE:
case EVENT_DQUOTE:
case EVENT_ITEM:
- arg = create_arg_item(event, token, type, error_str);
- if (!arg)
+ ret = create_arg_item(event, token, type, &arg, error_str);
+ if (ret < 0)
goto fail;
if (!left_item)
left_item = arg;
@@ -923,20 +986,20 @@ process_filter(struct event_format *event, struct filter_arg **parg,
if (not) {
arg = NULL;
if (current_op)
- goto fail_print;
+ goto fail_syntax;
free(token);
*parg = current_exp;
return 0;
}
} else
- goto fail_print;
+ goto fail_syntax;
arg = NULL;
break;
case EVENT_DELIM:
if (*token == ',') {
- show_error(error_str,
- "Illegal token ','");
+ show_error(error_str, "Illegal token ','");
+ ret = PEVENT_ERRNO__ILLEGAL_TOKEN;
goto fail;
}
@@ -944,19 +1007,23 @@ process_filter(struct event_format *event, struct filter_arg **parg,
if (left_item) {
show_error(error_str,
"Open paren can not come after item");
+ ret = PEVENT_ERRNO__INVALID_PAREN;
goto fail;
}
if (current_exp) {
show_error(error_str,
"Open paren can not come after expression");
+ ret = PEVENT_ERRNO__INVALID_PAREN;
goto fail;
}
ret = process_filter(event, &arg, error_str, 0);
- if (ret != 1) {
- if (ret == 0)
+ if (ret != PEVENT_ERRNO__UNBALANCED_PAREN) {
+ if (ret == 0) {
show_error(error_str,
"Unbalanced number of '('");
+ ret = PEVENT_ERRNO__UNBALANCED_PAREN;
+ }
goto fail;
}
ret = 0;
@@ -964,7 +1031,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
/* A not wants just one expression */
if (not) {
if (current_op)
- goto fail_print;
+ goto fail_syntax;
*parg = arg;
return 0;
}
@@ -979,19 +1046,19 @@ process_filter(struct event_format *event, struct filter_arg **parg,
} else { /* ')' */
if (!current_op && !current_exp)
- goto fail_print;
+ goto fail_syntax;
/* Make sure everything is finished at this level */
if (current_exp && !check_op_done(current_exp))
- goto fail_print;
+ goto fail_syntax;
if (current_op && !check_op_done(current_op))
- goto fail_print;
+ goto fail_syntax;
if (current_op)
*parg = current_op;
else
*parg = current_exp;
- return 1;
+ return PEVENT_ERRNO__UNBALANCED_PAREN;
}
break;
@@ -1003,21 +1070,22 @@ process_filter(struct event_format *event, struct filter_arg **parg,
case OP_BOOL:
/* Logic ops need a left expression */
if (!current_exp && !current_op)
- goto fail_print;
+ goto fail_syntax;
/* fall through */
case OP_NOT:
/* logic only processes ops and exp */
if (left_item)
- goto fail_print;
+ goto fail_syntax;
break;
case OP_EXP:
case OP_CMP:
if (!left_item)
- goto fail_print;
+ goto fail_syntax;
break;
case OP_NONE:
show_error(error_str,
"Unknown op token %s", token);
+ ret = PEVENT_ERRNO__UNKNOWN_TOKEN;
goto fail;
}
@@ -1025,6 +1093,8 @@ process_filter(struct event_format *event, struct filter_arg **parg,
switch (op_type) {
case OP_BOOL:
arg = create_arg_op(btype);
+ if (arg == NULL)
+ goto fail_alloc;
if (current_op)
ret = add_left(arg, current_op);
else
@@ -1035,6 +1105,8 @@ process_filter(struct event_format *event, struct filter_arg **parg,
case OP_NOT:
arg = create_arg_op(btype);
+ if (arg == NULL)
+ goto fail_alloc;
if (current_op)
ret = add_right(current_op, arg, error_str);
if (ret < 0)
@@ -1054,6 +1126,8 @@ process_filter(struct event_format *event, struct filter_arg **parg,
arg = create_arg_exp(etype);
else
arg = create_arg_cmp(ctype);
+ if (arg == NULL)
+ goto fail_alloc;
if (current_op)
ret = add_right(current_op, arg, error_str);
@@ -1062,7 +1136,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
ret = add_left(arg, left_item);
if (ret < 0) {
arg = NULL;
- goto fail_print;
+ goto fail_syntax;
}
current_exp = arg;
break;
@@ -1071,57 +1145,64 @@ process_filter(struct event_format *event, struct filter_arg **parg,
}
arg = NULL;
if (ret < 0)
- goto fail_print;
+ goto fail_syntax;
break;
case EVENT_NONE:
break;
+ case EVENT_ERROR:
+ goto fail_alloc;
default:
- goto fail_print;
+ goto fail_syntax;
}
} while (type != EVENT_NONE);
if (!current_op && !current_exp)
- goto fail_print;
+ goto fail_syntax;
if (!current_op)
current_op = current_exp;
- current_op = collapse_tree(current_op);
+ ret = collapse_tree(current_op, parg, error_str);
+ if (ret < 0)
+ goto fail;
*parg = current_op;
return 0;
- fail_print:
+ fail_alloc:
+ show_error(error_str, "failed to allocate filter arg");
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ goto fail;
+ fail_syntax:
show_error(error_str, "Syntax error");
+ ret = PEVENT_ERRNO__SYNTAX_ERROR;
fail:
free_arg(current_op);
free_arg(current_exp);
free_arg(arg);
free(token);
- return -1;
+ return ret;
}
-static int
+static enum pevent_errno
process_event(struct event_format *event, const char *filter_str,
- struct filter_arg **parg, char **error_str)
+ struct filter_arg **parg, char *error_str)
{
int ret;
pevent_buffer_init(filter_str, strlen(filter_str));
ret = process_filter(event, parg, error_str, 0);
- if (ret == 1) {
- show_error(error_str,
- "Unbalanced number of ')'");
- return -1;
- }
if (ret < 0)
return ret;
/* If parg is NULL, then make it into FALSE */
if (!*parg) {
*parg = allocate_arg();
+ if (*parg == NULL)
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+
(*parg)->type = FILTER_ARG_BOOLEAN;
(*parg)->boolean.value = FILTER_FALSE;
}
@@ -1129,13 +1210,13 @@ process_event(struct event_format *event, const char *filter_str,
return 0;
}
-static int filter_event(struct event_filter *filter,
- struct event_format *event,
- const char *filter_str, char **error_str)
+static enum pevent_errno
+filter_event(struct event_filter *filter, struct event_format *event,
+ const char *filter_str, char *error_str)
{
struct filter_type *filter_type;
struct filter_arg *arg;
- int ret;
+ enum pevent_errno ret;
if (filter_str) {
ret = process_event(event, filter_str, &arg, error_str);
@@ -1145,11 +1226,17 @@ static int filter_event(struct event_filter *filter,
} else {
/* just add a TRUE arg */
arg = allocate_arg();
+ if (arg == NULL)
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+
arg->type = FILTER_ARG_BOOLEAN;
arg->boolean.value = FILTER_TRUE;
}
filter_type = add_filter_type(filter, event->id);
+ if (filter_type == NULL)
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+
if (filter_type->filter)
free_arg(filter_type->filter);
filter_type->filter = arg;
@@ -1157,22 +1244,24 @@ static int filter_event(struct event_filter *filter,
return 0;
}
+static void filter_init_error_buf(struct event_filter *filter)
+{
+ /* clear buffer to reset show error */
+ pevent_buffer_init("", 0);
+ filter->error_buffer[0] = '\0';
+}
+
/**
* pevent_filter_add_filter_str - add a new filter
* @filter: the event filter to add to
* @filter_str: the filter string that contains the filter
- * @error_str: string containing reason for failed filter
- *
- * Returns 0 if the filter was successfully added
- * -1 if there was an error.
*
- * On error, if @error_str points to a string pointer,
- * it is set to the reason that the filter failed.
- * This string must be freed with "free".
+ * Returns 0 if the filter was successfully added or a
+ * negative error code. Use pevent_filter_strerror() to see
+ * actual error message in case of error.
*/
-int pevent_filter_add_filter_str(struct event_filter *filter,
- const char *filter_str,
- char **error_str)
+enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
+ const char *filter_str)
{
struct pevent *pevent = filter->pevent;
struct event_list *event;
@@ -1183,15 +1272,11 @@ int pevent_filter_add_filter_str(struct event_filter *filter,
char *event_name = NULL;
char *sys_name = NULL;
char *sp;
- int rtn = 0;
+ enum pevent_errno rtn = 0; /* PEVENT_ERRNO__SUCCESS */
int len;
int ret;
- /* clear buffer to reset show error */
- pevent_buffer_init("", 0);
-
- if (error_str)
- *error_str = NULL;
+ filter_init_error_buf(filter);
filter_start = strchr(filter_str, ':');
if (filter_start)
@@ -1199,7 +1284,6 @@ int pevent_filter_add_filter_str(struct event_filter *filter,
else
len = strlen(filter_str);
-
do {
next_event = strchr(filter_str, ',');
if (next_event &&
@@ -1210,7 +1294,12 @@ int pevent_filter_add_filter_str(struct event_filter *filter,
else
len = strlen(filter_str);
- this_event = malloc_or_die(len + 1);
+ this_event = malloc(len + 1);
+ if (this_event == NULL) {
+ /* This can only happen when events is NULL, but still */
+ free_events(events);
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
memcpy(this_event, filter_str, len);
this_event[len] = 0;
@@ -1223,27 +1312,18 @@ int pevent_filter_add_filter_str(struct event_filter *filter,
event_name = strtok_r(NULL, "/", &sp);
if (!sys_name) {
- show_error(error_str, "No filter found");
/* This can only happen when events is NULL, but still */
free_events(events);
free(this_event);
- return -1;
+ return PEVENT_ERRNO__FILTER_NOT_FOUND;
}
/* Find this event */
ret = find_event(pevent, &events, strim(sys_name), strim(event_name));
if (ret < 0) {
- if (event_name)
- show_error(error_str,
- "No event found under '%s.%s'",
- sys_name, event_name);
- else
- show_error(error_str,
- "No event found under '%s'",
- sys_name);
free_events(events);
free(this_event);
- return -1;
+ return ret;
}
free(this_event);
} while (filter_str);
@@ -1255,7 +1335,7 @@ int pevent_filter_add_filter_str(struct event_filter *filter,
/* filter starts here */
for (event = events; event; event = event->next) {
ret = filter_event(filter, event->event, filter_start,
- error_str);
+ filter->error_buffer);
/* Failures are returned if a parse error happened */
if (ret < 0)
rtn = ret;
@@ -1263,8 +1343,10 @@ int pevent_filter_add_filter_str(struct event_filter *filter,
if (ret >= 0 && pevent->test_filters) {
char *test;
test = pevent_filter_make_string(filter, event->event->id);
- printf(" '%s: %s'\n", event->event->name, test);
- free(test);
+ if (test) {
+ printf(" '%s: %s'\n", event->event->name, test);
+ free(test);
+ }
}
}
@@ -1282,6 +1364,32 @@ static void free_filter_type(struct filter_type *filter_type)
}
/**
+ * pevent_filter_strerror - fill error message in a buffer
+ * @filter: the event filter contains error
+ * @err: the error code
+ * @buf: the buffer to be filled in
+ * @buflen: the size of the buffer
+ *
+ * Returns 0 if message was filled successfully, -1 if error
+ */
+int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err,
+ char *buf, size_t buflen)
+{
+ if (err <= __PEVENT_ERRNO__START || err >= __PEVENT_ERRNO__END)
+ return -1;
+
+ if (strlen(filter->error_buffer) > 0) {
+ size_t len = snprintf(buf, buflen, "%s", filter->error_buffer);
+
+ if (len > buflen)
+ return -1;
+ return 0;
+ }
+
+ return pevent_strerror(filter->pevent, err, buf, buflen);
+}
+
+/**
* pevent_filter_remove_event - remove a filter for an event
* @filter: the event filter to remove from
* @event_id: the event to remove a filter for
@@ -1374,6 +1482,9 @@ static int copy_filter_type(struct event_filter *filter,
if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
/* Add trivial event */
arg = allocate_arg();
+ if (arg == NULL)
+ return -1;
+
arg->type = FILTER_ARG_BOOLEAN;
if (strcmp(str, "TRUE") == 0)
arg->boolean.value = 1;
@@ -1381,6 +1492,9 @@ static int copy_filter_type(struct event_filter *filter,
arg->boolean.value = 0;
filter_type = add_filter_type(filter, event->id);
+ if (filter_type == NULL)
+ return -1;
+
filter_type->filter = arg;
free(str);
@@ -1482,8 +1596,10 @@ int pevent_update_trivial(struct event_filter *dest, struct event_filter *source
* @type: remove only true, false, or both
*
* Removes filters that only contain a TRUE or FALES boolean arg.
+ *
+ * Returns 0 on success and -1 if there was a problem.
*/
-void pevent_filter_clear_trivial(struct event_filter *filter,
+int pevent_filter_clear_trivial(struct event_filter *filter,
enum filter_trivial_type type)
{
struct filter_type *filter_type;
@@ -1492,13 +1608,15 @@ void pevent_filter_clear_trivial(struct event_filter *filter,
int i;
if (!filter->filters)
- return;
+ return 0;
/*
* Two steps, first get all ids with trivial filters.
* then remove those ids.
*/
for (i = 0; i < filter->filters; i++) {
+ int *new_ids;
+
filter_type = &filter->event_filters[i];
if (filter_type->filter->type != FILTER_ARG_BOOLEAN)
continue;
@@ -1513,19 +1631,24 @@ void pevent_filter_clear_trivial(struct event_filter *filter,
break;
}
- ids = realloc(ids, sizeof(*ids) * (count + 1));
- if (!ids)
- die("Can't allocate ids");
+ new_ids = realloc(ids, sizeof(*ids) * (count + 1));
+ if (!new_ids) {
+ free(ids);
+ return -1;
+ }
+
+ ids = new_ids;
ids[count++] = filter_type->event_id;
}
if (!count)
- return;
+ return 0;
for (i = 0; i < count; i++)
pevent_filter_remove_event(filter, ids[i]);
free(ids);
+ return 0;
}
/**
@@ -1565,8 +1688,8 @@ int pevent_filter_event_has_trivial(struct event_filter *filter,
}
}
-static int test_filter(struct event_format *event,
- struct filter_arg *arg, struct pevent_record *record);
+static int test_filter(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err);
static const char *
get_comm(struct event_format *event, struct pevent_record *record)
@@ -1612,15 +1735,24 @@ get_value(struct event_format *event,
}
static unsigned long long
-get_arg_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record);
+get_arg_value(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err);
static unsigned long long
-get_exp_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record)
+get_exp_value(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err)
{
unsigned long long lval, rval;
- lval = get_arg_value(event, arg->exp.left, record);
- rval = get_arg_value(event, arg->exp.right, record);
+ lval = get_arg_value(event, arg->exp.left, record, err);
+ rval = get_arg_value(event, arg->exp.right, record, err);
+
+ if (*err) {
+ /*
+ * There was an error, no need to process anymore.
+ */
+ return 0;
+ }
switch (arg->exp.type) {
case FILTER_EXP_ADD:
@@ -1655,39 +1787,51 @@ get_exp_value(struct event_format *event, struct filter_arg *arg, struct pevent_
case FILTER_EXP_NOT:
default:
- die("error in exp");
+ if (!*err)
+ *err = PEVENT_ERRNO__INVALID_EXP_TYPE;
}
return 0;
}
static unsigned long long
-get_arg_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record)
+get_arg_value(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err)
{
switch (arg->type) {
case FILTER_ARG_FIELD:
return get_value(event, arg->field.field, record);
case FILTER_ARG_VALUE:
- if (arg->value.type != FILTER_NUMBER)
- die("must have number field!");
+ if (arg->value.type != FILTER_NUMBER) {
+ if (!*err)
+ *err = PEVENT_ERRNO__NOT_A_NUMBER;
+ }
return arg->value.val;
case FILTER_ARG_EXP:
- return get_exp_value(event, arg, record);
+ return get_exp_value(event, arg, record, err);
default:
- die("oops in filter");
+ if (!*err)
+ *err = PEVENT_ERRNO__INVALID_ARG_TYPE;
}
return 0;
}
-static int test_num(struct event_format *event,
- struct filter_arg *arg, struct pevent_record *record)
+static int test_num(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err)
{
unsigned long long lval, rval;
- lval = get_arg_value(event, arg->num.left, record);
- rval = get_arg_value(event, arg->num.right, record);
+ lval = get_arg_value(event, arg->num.left, record, err);
+ rval = get_arg_value(event, arg->num.right, record, err);
+
+ if (*err) {
+ /*
+ * There was an error, no need to process anymore.
+ */
+ return 0;
+ }
switch (arg->num.type) {
case FILTER_CMP_EQ:
@@ -1709,7 +1853,8 @@ static int test_num(struct event_format *event,
return lval <= rval;
default:
- /* ?? */
+ if (!*err)
+ *err = PEVENT_ERRNO__ILLEGAL_INTEGER_CMP;
return 0;
}
}
@@ -1756,8 +1901,8 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
return val;
}
-static int test_str(struct event_format *event,
- struct filter_arg *arg, struct pevent_record *record)
+static int test_str(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err)
{
const char *val;
@@ -1781,48 +1926,57 @@ static int test_str(struct event_format *event,
return regexec(&arg->str.reg, val, 0, NULL, 0);
default:
- /* ?? */
+ if (!*err)
+ *err = PEVENT_ERRNO__ILLEGAL_STRING_CMP;
return 0;
}
}
-static int test_op(struct event_format *event,
- struct filter_arg *arg, struct pevent_record *record)
+static int test_op(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err)
{
switch (arg->op.type) {
case FILTER_OP_AND:
- return test_filter(event, arg->op.left, record) &&
- test_filter(event, arg->op.right, record);
+ return test_filter(event, arg->op.left, record, err) &&
+ test_filter(event, arg->op.right, record, err);
case FILTER_OP_OR:
- return test_filter(event, arg->op.left, record) ||
- test_filter(event, arg->op.right, record);
+ return test_filter(event, arg->op.left, record, err) ||
+ test_filter(event, arg->op.right, record, err);
case FILTER_OP_NOT:
- return !test_filter(event, arg->op.right, record);
+ return !test_filter(event, arg->op.right, record, err);
default:
- /* ?? */
+ if (!*err)
+ *err = PEVENT_ERRNO__INVALID_OP_TYPE;
return 0;
}
}
-static int test_filter(struct event_format *event,
- struct filter_arg *arg, struct pevent_record *record)
+static int test_filter(struct event_format *event, struct filter_arg *arg,
+ struct pevent_record *record, enum pevent_errno *err)
{
+ if (*err) {
+ /*
+ * There was an error, no need to process anymore.
+ */
+ return 0;
+ }
+
switch (arg->type) {
case FILTER_ARG_BOOLEAN:
/* easy case */
return arg->boolean.value;
case FILTER_ARG_OP:
- return test_op(event, arg, record);
+ return test_op(event, arg, record, err);
case FILTER_ARG_NUM:
- return test_num(event, arg, record);
+ return test_num(event, arg, record, err);
case FILTER_ARG_STR:
- return test_str(event, arg, record);
+ return test_str(event, arg, record, err);
case FILTER_ARG_EXP:
case FILTER_ARG_VALUE:
@@ -1831,11 +1985,11 @@ static int test_filter(struct event_format *event,
* Expressions, fields and values evaluate
* to true if they return non zero
*/
- return !!get_arg_value(event, arg, record);
+ return !!get_arg_value(event, arg, record, err);
default:
- die("oops!");
- /* ?? */
+ if (!*err)
+ *err = PEVENT_ERRNO__INVALID_ARG_TYPE;
return 0;
}
}
@@ -1848,8 +2002,7 @@ static int test_filter(struct event_format *event,
* Returns 1 if filter found for @event_id
* otherwise 0;
*/
-int pevent_event_filtered(struct event_filter *filter,
- int event_id)
+int pevent_event_filtered(struct event_filter *filter, int event_id)
{
struct filter_type *filter_type;
@@ -1866,31 +2019,38 @@ int pevent_event_filtered(struct event_filter *filter,
* @filter: filter struct with filter information
* @record: the record to test against the filter
*
- * Returns:
- * 1 - filter found for event and @record matches
- * 0 - filter found for event and @record does not match
- * -1 - no filter found for @record's event
- * -2 - if no filters exist
+ * Returns: match result or error code (prefixed with PEVENT_ERRNO__)
+ * FILTER_MATCH - filter found for event and @record matches
+ * FILTER_MISS - filter found for event and @record does not match
+ * FILTER_NOT_FOUND - no filter found for @record's event
+ * NO_FILTER - if no filters exist
+ * otherwise - error occurred during test
*/
-int pevent_filter_match(struct event_filter *filter,
- struct pevent_record *record)
+enum pevent_errno pevent_filter_match(struct event_filter *filter,
+ struct pevent_record *record)
{
struct pevent *pevent = filter->pevent;
struct filter_type *filter_type;
int event_id;
+ int ret;
+ enum pevent_errno err = 0;
+
+ filter_init_error_buf(filter);
if (!filter->filters)
- return FILTER_NONE;
+ return PEVENT_ERRNO__NO_FILTER;
event_id = pevent_data_type(pevent, record);
filter_type = find_filter_type(filter, event_id);
-
if (!filter_type)
- return FILTER_NOEXIST;
+ return PEVENT_ERRNO__FILTER_NOT_FOUND;
+
+ ret = test_filter(filter_type->event, filter_type->filter, record, &err);
+ if (err)
+ return err;
- return test_filter(filter_type->event, filter_type->filter, record) ?
- FILTER_MATCH : FILTER_MISS;
+ return ret ? PEVENT_ERRNO__FILTER_MATCH : PEVENT_ERRNO__FILTER_MISS;
}
static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
@@ -1902,7 +2062,6 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
int left_val = -1;
int right_val = -1;
int val;
- int len;
switch (arg->op.type) {
case FILTER_OP_AND:
@@ -1949,11 +2108,7 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
default:
break;
}
- str = malloc_or_die(6);
- if (val)
- strcpy(str, "TRUE");
- else
- strcpy(str, "FALSE");
+ asprintf(&str, val ? "TRUE" : "FALSE");
break;
}
}
@@ -1971,10 +2126,7 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
break;
}
- len = strlen(left) + strlen(right) + strlen(op) + 10;
- str = malloc_or_die(len);
- snprintf(str, len, "(%s) %s (%s)",
- left, op, right);
+ asprintf(&str, "(%s) %s (%s)", left, op, right);
break;
case FILTER_OP_NOT:
@@ -1990,16 +2142,10 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
right_val = 0;
if (right_val >= 0) {
/* just return the opposite */
- str = malloc_or_die(6);
- if (right_val)
- strcpy(str, "FALSE");
- else
- strcpy(str, "TRUE");
+ asprintf(&str, right_val ? "FALSE" : "TRUE");
break;
}
- len = strlen(right) + strlen(op) + 3;
- str = malloc_or_die(len);
- snprintf(str, len, "%s(%s)", op, right);
+ asprintf(&str, "%s(%s)", op, right);
break;
default:
@@ -2013,11 +2159,9 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
static char *val_to_str(struct event_filter *filter, struct filter_arg *arg)
{
- char *str;
-
- str = malloc_or_die(30);
+ char *str = NULL;
- snprintf(str, 30, "%lld", arg->value.val);
+ asprintf(&str, "%lld", arg->value.val);
return str;
}
@@ -2033,7 +2177,6 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
char *rstr;
char *op;
char *str = NULL;
- int len;
lstr = arg_to_str(filter, arg->exp.left);
rstr = arg_to_str(filter, arg->exp.right);
@@ -2072,12 +2215,11 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
op = "^";
break;
default:
- die("oops in exp");
+ op = "[ERROR IN EXPRESSION TYPE]";
+ break;
}
- len = strlen(op) + strlen(lstr) + strlen(rstr) + 4;
- str = malloc_or_die(len);
- snprintf(str, len, "%s %s %s", lstr, op, rstr);
+ asprintf(&str, "%s %s %s", lstr, op, rstr);
out:
free(lstr);
free(rstr);
@@ -2091,7 +2233,6 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
char *rstr;
char *str = NULL;
char *op = NULL;
- int len;
lstr = arg_to_str(filter, arg->num.left);
rstr = arg_to_str(filter, arg->num.right);
@@ -2122,10 +2263,7 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
if (!op)
op = "<=";
- len = strlen(lstr) + strlen(op) + strlen(rstr) + 4;
- str = malloc_or_die(len);
- sprintf(str, "%s %s %s", lstr, op, rstr);
-
+ asprintf(&str, "%s %s %s", lstr, op, rstr);
break;
default:
@@ -2143,7 +2281,6 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg)
{
char *str = NULL;
char *op = NULL;
- int len;
switch (arg->str.type) {
case FILTER_CMP_MATCH:
@@ -2161,12 +2298,8 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg)
if (!op)
op = "!~";
- len = strlen(arg->str.field->name) + strlen(op) +
- strlen(arg->str.val) + 6;
- str = malloc_or_die(len);
- snprintf(str, len, "%s %s \"%s\"",
- arg->str.field->name,
- op, arg->str.val);
+ asprintf(&str, "%s %s \"%s\"",
+ arg->str.field->name, op, arg->str.val);
break;
default:
@@ -2178,15 +2311,11 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg)
static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
{
- char *str;
+ char *str = NULL;
switch (arg->type) {
case FILTER_ARG_BOOLEAN:
- str = malloc_or_die(6);
- if (arg->boolean.value)
- strcpy(str, "TRUE");
- else
- strcpy(str, "FALSE");
+ asprintf(&str, arg->boolean.value ? "TRUE" : "FALSE");
return str;
case FILTER_ARG_OP:
@@ -2221,7 +2350,7 @@ static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
*
* Returns a string that displays the filter contents.
* This string must be freed with free(str).
- * NULL is returned if no filter is found.
+ * NULL is returned if no filter is found or allocation failed.
*/
char *
pevent_filter_make_string(struct event_filter *filter, int event_id)
diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c
index bba701c..eda07fa 100644
--- a/tools/lib/traceevent/parse-utils.c
+++ b/tools/lib/traceevent/parse-utils.c
@@ -25,40 +25,6 @@
#define __weak __attribute__((weak))
-void __vdie(const char *fmt, va_list ap)
-{
- int ret = errno;
-
- if (errno)
- perror("trace-cmd");
- else
- ret = -1;
-
- fprintf(stderr, " ");
- vfprintf(stderr, fmt, ap);
-
- fprintf(stderr, "\n");
- exit(ret);
-}
-
-void __die(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __vdie(fmt, ap);
- va_end(ap);
-}
-
-void __weak die(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __vdie(fmt, ap);
- va_end(ap);
-}
-
void __vwarning(const char *fmt, va_list ap)
{
if (errno)
@@ -117,13 +83,3 @@ void __weak pr_stat(const char *fmt, ...)
__vpr_stat(fmt, ap);
va_end(ap);
}
-
-void __weak *malloc_or_die(unsigned int size)
-{
- void *data;
-
- data = malloc(size);
- if (!data)
- die("malloc");
- return data;
-}
diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugin_cfg80211.c
new file mode 100644
index 0000000..c066b25
--- /dev/null
+++ b/tools/lib/traceevent/plugin_cfg80211.c
@@ -0,0 +1,30 @@
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <endian.h>
+#include "event-parse.h"
+
+static unsigned long long
+process___le16_to_cpup(struct trace_seq *s,
+ unsigned long long *args)
+{
+ uint16_t *val = (uint16_t *) (unsigned long) args[0];
+ return val ? (long long) le16toh(*val) : 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_print_function(pevent,
+ process___le16_to_cpup,
+ PEVENT_FUNC_ARG_INT,
+ "__le16_to_cpup",
+ PEVENT_FUNC_ARG_PTR,
+ PEVENT_FUNC_ARG_VOID);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_print_function(pevent, process___le16_to_cpup,
+ "__le16_to_cpup");
+}
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c
new file mode 100644
index 0000000..80ba4ff
--- /dev/null
+++ b/tools/lib/traceevent/plugin_function.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+#include "event-utils.h"
+
+static struct func_stack {
+ int size;
+ char **stack;
+} *fstack;
+
+static int cpus = -1;
+
+#define STK_BLK 10
+
+static void add_child(struct func_stack *stack, const char *child, int pos)
+{
+ int i;
+
+ if (!child)
+ return;
+
+ if (pos < stack->size)
+ free(stack->stack[pos]);
+ else {
+ char **ptr;
+
+ ptr = realloc(stack->stack, sizeof(char *) *
+ (stack->size + STK_BLK));
+ if (!ptr) {
+ warning("could not allocate plugin memory\n");
+ return;
+ }
+
+ stack->stack = ptr;
+
+ for (i = stack->size; i < stack->size + STK_BLK; i++)
+ stack->stack[i] = NULL;
+ stack->size += STK_BLK;
+ }
+
+ stack->stack[pos] = strdup(child);
+}
+
+static int add_and_get_index(const char *parent, const char *child, int cpu)
+{
+ int i;
+
+ if (cpu < 0)
+ return 0;
+
+ if (cpu > cpus) {
+ struct func_stack *ptr;
+
+ ptr = realloc(fstack, sizeof(*fstack) * (cpu + 1));
+ if (!ptr) {
+ warning("could not allocate plugin memory\n");
+ return 0;
+ }
+
+ fstack = ptr;
+
+ /* Account for holes in the cpu count */
+ for (i = cpus + 1; i <= cpu; i++)
+ memset(&fstack[i], 0, sizeof(fstack[i]));
+ cpus = cpu;
+ }
+
+ for (i = 0; i < fstack[cpu].size && fstack[cpu].stack[i]; i++) {
+ if (strcmp(parent, fstack[cpu].stack[i]) == 0) {
+ add_child(&fstack[cpu], child, i+1);
+ return i;
+ }
+ }
+
+ /* Not found */
+ add_child(&fstack[cpu], parent, 0);
+ add_child(&fstack[cpu], child, 1);
+ return 0;
+}
+
+static int function_handler(struct trace_seq *s, struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ struct pevent *pevent = event->pevent;
+ unsigned long long function;
+ unsigned long long pfunction;
+ const char *func;
+ const char *parent;
+ int index;
+
+ if (pevent_get_field_val(s, event, "ip", record, &function, 1))
+ return trace_seq_putc(s, '!');
+
+ func = pevent_find_function(pevent, function);
+
+ if (pevent_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
+ return trace_seq_putc(s, '!');
+
+ parent = pevent_find_function(pevent, pfunction);
+
+ index = add_and_get_index(parent, func, record->cpu);
+
+ trace_seq_printf(s, "%*s", index*3, "");
+
+ if (func)
+ trace_seq_printf(s, "%s", func);
+ else
+ trace_seq_printf(s, "0x%llx", function);
+
+ trace_seq_printf(s, " <-- ");
+ if (parent)
+ trace_seq_printf(s, "%s", parent);
+ else
+ trace_seq_printf(s, "0x%llx", pfunction);
+
+ return 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_event_handler(pevent, -1, "ftrace", "function",
+ function_handler, NULL);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ int i, x;
+
+ pevent_unregister_event_handler(pevent, -1, "ftrace", "function",
+ function_handler, NULL);
+
+ for (i = 0; i <= cpus; i++) {
+ for (x = 0; x < fstack[i].size && fstack[i].stack[x]; x++)
+ free(fstack[i].stack[x]);
+ free(fstack[i].stack);
+ }
+
+ free(fstack);
+ fstack = NULL;
+ cpus = -1;
+}
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c
new file mode 100644
index 0000000..12bf14c
--- /dev/null
+++ b/tools/lib/traceevent/plugin_hrtimer.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+
+static int timer_expire_handler(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ trace_seq_printf(s, "hrtimer=");
+
+ if (pevent_print_num_field(s, "0x%llx", event, "timer",
+ record, 0) == -1)
+ pevent_print_num_field(s, "0x%llx", event, "hrtimer",
+ record, 1);
+
+ trace_seq_printf(s, " now=");
+
+ pevent_print_num_field(s, "%llu", event, "now", record, 1);
+
+ pevent_print_func_field(s, " function=%s", event, "function",
+ record, 0);
+ return 0;
+}
+
+static int timer_start_handler(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ trace_seq_printf(s, "hrtimer=");
+
+ if (pevent_print_num_field(s, "0x%llx", event, "timer",
+ record, 0) == -1)
+ pevent_print_num_field(s, "0x%llx", event, "hrtimer",
+ record, 1);
+
+ pevent_print_func_field(s, " function=%s", event, "function",
+ record, 0);
+
+ trace_seq_printf(s, " expires=");
+ pevent_print_num_field(s, "%llu", event, "expires", record, 1);
+
+ trace_seq_printf(s, " softexpires=");
+ pevent_print_num_field(s, "%llu", event, "softexpires", record, 1);
+ return 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_event_handler(pevent, -1,
+ "timer", "hrtimer_expire_entry",
+ timer_expire_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "timer", "hrtimer_start",
+ timer_start_handler, NULL);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_event_handler(pevent, -1,
+ "timer", "hrtimer_expire_entry",
+ timer_expire_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "timer", "hrtimer_start",
+ timer_start_handler, NULL);
+}
diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c
new file mode 100644
index 0000000..0db714c
--- /dev/null
+++ b/tools/lib/traceevent/plugin_jbd2.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+
+#define MINORBITS 20
+#define MINORMASK ((1U << MINORBITS) - 1)
+
+#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS))
+#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
+
+static unsigned long long
+process_jbd2_dev_to_name(struct trace_seq *s,
+ unsigned long long *args)
+{
+ unsigned int dev = args[0];
+
+ trace_seq_printf(s, "%d:%d", MAJOR(dev), MINOR(dev));
+ return 0;
+}
+
+static unsigned long long
+process_jiffies_to_msecs(struct trace_seq *s,
+ unsigned long long *args)
+{
+ unsigned long long jiffies = args[0];
+
+ trace_seq_printf(s, "%lld", jiffies);
+ return jiffies;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_print_function(pevent,
+ process_jbd2_dev_to_name,
+ PEVENT_FUNC_ARG_STRING,
+ "jbd2_dev_to_name",
+ PEVENT_FUNC_ARG_INT,
+ PEVENT_FUNC_ARG_VOID);
+
+ pevent_register_print_function(pevent,
+ process_jiffies_to_msecs,
+ PEVENT_FUNC_ARG_LONG,
+ "jiffies_to_msecs",
+ PEVENT_FUNC_ARG_LONG,
+ PEVENT_FUNC_ARG_VOID);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_print_function(pevent, process_jbd2_dev_to_name,
+ "jbd2_dev_to_name");
+
+ pevent_unregister_print_function(pevent, process_jiffies_to_msecs,
+ "jiffies_to_msecs");
+}
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c
new file mode 100644
index 0000000..70650ff
--- /dev/null
+++ b/tools/lib/traceevent/plugin_kmem.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+
+static int call_site_handler(struct trace_seq *s, struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ struct format_field *field;
+ unsigned long long val, addr;
+ void *data = record->data;
+ const char *func;
+
+ field = pevent_find_field(event, "call_site");
+ if (!field)
+ return 1;
+
+ if (pevent_read_number_field(field, data, &val))
+ return 1;
+
+ func = pevent_find_function(event->pevent, val);
+ if (!func)
+ return 1;
+
+ addr = pevent_find_function_address(event->pevent, val);
+
+ trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr));
+ return 1;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_event_handler(pevent, -1, "kmem", "kfree",
+ call_site_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kmem", "kmalloc",
+ call_site_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kmem", "kmalloc_node",
+ call_site_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+ call_site_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kmem",
+ "kmem_cache_alloc_node",
+ call_site_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+ call_site_handler, NULL);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_event_handler(pevent, -1, "kmem", "kfree",
+ call_site_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kmem", "kmalloc",
+ call_site_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node",
+ call_site_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+ call_site_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kmem",
+ "kmem_cache_alloc_node",
+ call_site_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+ call_site_handler, NULL);
+}
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
new file mode 100644
index 0000000..9e0e8c6
--- /dev/null
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#include "event-parse.h"
+
+#ifdef HAVE_UDIS86
+
+#include <udis86.h>
+
+static ud_t ud;
+
+static void init_disassembler(void)
+{
+ ud_init(&ud);
+ ud_set_syntax(&ud, UD_SYN_ATT);
+}
+
+static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
+ int cr0_pe, int eflags_vm,
+ int cs_d, int cs_l)
+{
+ int mode;
+
+ if (!cr0_pe)
+ mode = 16;
+ else if (eflags_vm)
+ mode = 16;
+ else if (cs_l)
+ mode = 64;
+ else if (cs_d)
+ mode = 32;
+ else
+ mode = 16;
+
+ ud_set_pc(&ud, rip);
+ ud_set_mode(&ud, mode);
+ ud_set_input_buffer(&ud, insn, len);
+ ud_disassemble(&ud);
+ return ud_insn_asm(&ud);
+}
+
+#else
+
+static void init_disassembler(void)
+{
+}
+
+static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
+ int cr0_pe, int eflags_vm,
+ int cs_d, int cs_l)
+{
+ static char out[15*3+1];
+ int i;
+
+ for (i = 0; i < len; ++i)
+ sprintf(out + i * 3, "%02x ", insn[i]);
+ out[len*3-1] = '\0';
+ return out;
+}
+
+#endif
+
+
+#define VMX_EXIT_REASONS \
+ _ER(EXCEPTION_NMI, 0) \
+ _ER(EXTERNAL_INTERRUPT, 1) \
+ _ER(TRIPLE_FAULT, 2) \
+ _ER(PENDING_INTERRUPT, 7) \
+ _ER(NMI_WINDOW, 8) \
+ _ER(TASK_SWITCH, 9) \
+ _ER(CPUID, 10) \
+ _ER(HLT, 12) \
+ _ER(INVD, 13) \
+ _ER(INVLPG, 14) \
+ _ER(RDPMC, 15) \
+ _ER(RDTSC, 16) \
+ _ER(VMCALL, 18) \
+ _ER(VMCLEAR, 19) \
+ _ER(VMLAUNCH, 20) \
+ _ER(VMPTRLD, 21) \
+ _ER(VMPTRST, 22) \
+ _ER(VMREAD, 23) \
+ _ER(VMRESUME, 24) \
+ _ER(VMWRITE, 25) \
+ _ER(VMOFF, 26) \
+ _ER(VMON, 27) \
+ _ER(CR_ACCESS, 28) \
+ _ER(DR_ACCESS, 29) \
+ _ER(IO_INSTRUCTION, 30) \
+ _ER(MSR_READ, 31) \
+ _ER(MSR_WRITE, 32) \
+ _ER(MWAIT_INSTRUCTION, 36) \
+ _ER(MONITOR_INSTRUCTION, 39) \
+ _ER(PAUSE_INSTRUCTION, 40) \
+ _ER(MCE_DURING_VMENTRY, 41) \
+ _ER(TPR_BELOW_THRESHOLD, 43) \
+ _ER(APIC_ACCESS, 44) \
+ _ER(EOI_INDUCED, 45) \
+ _ER(EPT_VIOLATION, 48) \
+ _ER(EPT_MISCONFIG, 49) \
+ _ER(INVEPT, 50) \
+ _ER(PREEMPTION_TIMER, 52) \
+ _ER(WBINVD, 54) \
+ _ER(XSETBV, 55) \
+ _ER(APIC_WRITE, 56) \
+ _ER(INVPCID, 58)
+
+#define SVM_EXIT_REASONS \
+ _ER(EXIT_READ_CR0, 0x000) \
+ _ER(EXIT_READ_CR3, 0x003) \
+ _ER(EXIT_READ_CR4, 0x004) \
+ _ER(EXIT_READ_CR8, 0x008) \
+ _ER(EXIT_WRITE_CR0, 0x010) \
+ _ER(EXIT_WRITE_CR3, 0x013) \
+ _ER(EXIT_WRITE_CR4, 0x014) \
+ _ER(EXIT_WRITE_CR8, 0x018) \
+ _ER(EXIT_READ_DR0, 0x020) \
+ _ER(EXIT_READ_DR1, 0x021) \
+ _ER(EXIT_READ_DR2, 0x022) \
+ _ER(EXIT_READ_DR3, 0x023) \
+ _ER(EXIT_READ_DR4, 0x024) \
+ _ER(EXIT_READ_DR5, 0x025) \
+ _ER(EXIT_READ_DR6, 0x026) \
+ _ER(EXIT_READ_DR7, 0x027) \
+ _ER(EXIT_WRITE_DR0, 0x030) \
+ _ER(EXIT_WRITE_DR1, 0x031) \
+ _ER(EXIT_WRITE_DR2, 0x032) \
+ _ER(EXIT_WRITE_DR3, 0x033) \
+ _ER(EXIT_WRITE_DR4, 0x034) \
+ _ER(EXIT_WRITE_DR5, 0x035) \
+ _ER(EXIT_WRITE_DR6, 0x036) \
+ _ER(EXIT_WRITE_DR7, 0x037) \
+ _ER(EXIT_EXCP_BASE, 0x040) \
+ _ER(EXIT_INTR, 0x060) \
+ _ER(EXIT_NMI, 0x061) \
+ _ER(EXIT_SMI, 0x062) \
+ _ER(EXIT_INIT, 0x063) \
+ _ER(EXIT_VINTR, 0x064) \
+ _ER(EXIT_CR0_SEL_WRITE, 0x065) \
+ _ER(EXIT_IDTR_READ, 0x066) \
+ _ER(EXIT_GDTR_READ, 0x067) \
+ _ER(EXIT_LDTR_READ, 0x068) \
+ _ER(EXIT_TR_READ, 0x069) \
+ _ER(EXIT_IDTR_WRITE, 0x06a) \
+ _ER(EXIT_GDTR_WRITE, 0x06b) \
+ _ER(EXIT_LDTR_WRITE, 0x06c) \
+ _ER(EXIT_TR_WRITE, 0x06d) \
+ _ER(EXIT_RDTSC, 0x06e) \
+ _ER(EXIT_RDPMC, 0x06f) \
+ _ER(EXIT_PUSHF, 0x070) \
+ _ER(EXIT_POPF, 0x071) \
+ _ER(EXIT_CPUID, 0x072) \
+ _ER(EXIT_RSM, 0x073) \
+ _ER(EXIT_IRET, 0x074) \
+ _ER(EXIT_SWINT, 0x075) \
+ _ER(EXIT_INVD, 0x076) \
+ _ER(EXIT_PAUSE, 0x077) \
+ _ER(EXIT_HLT, 0x078) \
+ _ER(EXIT_INVLPG, 0x079) \
+ _ER(EXIT_INVLPGA, 0x07a) \
+ _ER(EXIT_IOIO, 0x07b) \
+ _ER(EXIT_MSR, 0x07c) \
+ _ER(EXIT_TASK_SWITCH, 0x07d) \
+ _ER(EXIT_FERR_FREEZE, 0x07e) \
+ _ER(EXIT_SHUTDOWN, 0x07f) \
+ _ER(EXIT_VMRUN, 0x080) \
+ _ER(EXIT_VMMCALL, 0x081) \
+ _ER(EXIT_VMLOAD, 0x082) \
+ _ER(EXIT_VMSAVE, 0x083) \
+ _ER(EXIT_STGI, 0x084) \
+ _ER(EXIT_CLGI, 0x085) \
+ _ER(EXIT_SKINIT, 0x086) \
+ _ER(EXIT_RDTSCP, 0x087) \
+ _ER(EXIT_ICEBP, 0x088) \
+ _ER(EXIT_WBINVD, 0x089) \
+ _ER(EXIT_MONITOR, 0x08a) \
+ _ER(EXIT_MWAIT, 0x08b) \
+ _ER(EXIT_MWAIT_COND, 0x08c) \
+ _ER(EXIT_NPF, 0x400) \
+ _ER(EXIT_ERR, -1)
+
+#define _ER(reason, val) { #reason, val },
+struct str_values {
+ const char *str;
+ int val;
+};
+
+static struct str_values vmx_exit_reasons[] = {
+ VMX_EXIT_REASONS
+ { NULL, -1}
+};
+
+static struct str_values svm_exit_reasons[] = {
+ SVM_EXIT_REASONS
+ { NULL, -1}
+};
+
+static struct isa_exit_reasons {
+ unsigned isa;
+ struct str_values *strings;
+} isa_exit_reasons[] = {
+ { .isa = 1, .strings = vmx_exit_reasons },
+ { .isa = 2, .strings = svm_exit_reasons },
+ { }
+};
+
+static const char *find_exit_reason(unsigned isa, int val)
+{
+ struct str_values *strings = NULL;
+ int i;
+
+ for (i = 0; isa_exit_reasons[i].strings; ++i)
+ if (isa_exit_reasons[i].isa == isa) {
+ strings = isa_exit_reasons[i].strings;
+ break;
+ }
+ if (!strings)
+ return "UNKNOWN-ISA";
+ for (i = 0; strings[i].val >= 0; i++)
+ if (strings[i].val == val)
+ break;
+ if (strings[i].str)
+ return strings[i].str;
+ return "UNKNOWN";
+}
+
+static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ unsigned long long isa;
+ unsigned long long val;
+ unsigned long long info1 = 0, info2 = 0;
+
+ if (pevent_get_field_val(s, event, "exit_reason", record, &val, 1) < 0)
+ return -1;
+
+ if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0)
+ isa = 1;
+
+ trace_seq_printf(s, "reason %s", find_exit_reason(isa, val));
+
+ pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
+
+ if (pevent_get_field_val(s, event, "info1", record, &info1, 0) >= 0
+ && pevent_get_field_val(s, event, "info2", record, &info2, 0) >= 0)
+ trace_seq_printf(s, " info %llx %llx", info1, info2);
+
+ return 0;
+}
+
+#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
+#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
+#define KVM_EMUL_INSN_F_CS_D (1 << 2)
+#define KVM_EMUL_INSN_F_CS_L (1 << 3)
+
+static int kvm_emulate_insn_handler(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ unsigned long long rip, csbase, len, flags, failed;
+ int llen;
+ uint8_t *insn;
+ const char *disasm;
+
+ if (pevent_get_field_val(s, event, "rip", record, &rip, 1) < 0)
+ return -1;
+
+ if (pevent_get_field_val(s, event, "csbase", record, &csbase, 1) < 0)
+ return -1;
+
+ if (pevent_get_field_val(s, event, "len", record, &len, 1) < 0)
+ return -1;
+
+ if (pevent_get_field_val(s, event, "flags", record, &flags, 1) < 0)
+ return -1;
+
+ if (pevent_get_field_val(s, event, "failed", record, &failed, 1) < 0)
+ return -1;
+
+ insn = pevent_get_field_raw(s, event, "insn", record, &llen, 1);
+ if (!insn)
+ return -1;
+
+ disasm = disassemble(insn, len, rip,
+ flags & KVM_EMUL_INSN_F_CR0_PE,
+ flags & KVM_EMUL_INSN_F_EFL_VM,
+ flags & KVM_EMUL_INSN_F_CS_D,
+ flags & KVM_EMUL_INSN_F_CS_L);
+
+ trace_seq_printf(s, "%llx:%llx: %s%s", csbase, rip, disasm,
+ failed ? " FAIL" : "");
+ return 0;
+}
+
+union kvm_mmu_page_role {
+ unsigned word;
+ struct {
+ unsigned glevels:4;
+ unsigned level:4;
+ unsigned quadrant:2;
+ unsigned pad_for_nice_hex_output:6;
+ unsigned direct:1;
+ unsigned access:3;
+ unsigned invalid:1;
+ unsigned cr4_pge:1;
+ unsigned nxe:1;
+ };
+};
+
+static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ unsigned long long val;
+ static const char *access_str[] = {
+ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"
+ };
+ union kvm_mmu_page_role role;
+
+ if (pevent_get_field_val(s, event, "role", record, &val, 1) < 0)
+ return -1;
+
+ role.word = (int)val;
+
+ /*
+ * We can only use the structure if file is of the same
+ * endianess.
+ */
+ if (pevent_is_file_bigendian(event->pevent) ==
+ pevent_is_host_bigendian(event->pevent)) {
+
+ trace_seq_printf(s, "%u/%u q%u%s %s%s %spge %snxe",
+ role.level,
+ role.glevels,
+ role.quadrant,
+ role.direct ? " direct" : "",
+ access_str[role.access],
+ role.invalid ? " invalid" : "",
+ role.cr4_pge ? "" : "!",
+ role.nxe ? "" : "!");
+ } else
+ trace_seq_printf(s, "WORD: %08x", role.word);
+
+ pevent_print_num_field(s, " root %u ", event,
+ "root_count", record, 1);
+
+ if (pevent_get_field_val(s, event, "unsync", record, &val, 1) < 0)
+ return -1;
+
+ trace_seq_printf(s, "%s%c", val ? "unsync" : "sync", 0);
+ return 0;
+}
+
+static int kvm_mmu_get_page_handler(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ unsigned long long val;
+
+ if (pevent_get_field_val(s, event, "created", record, &val, 1) < 0)
+ return -1;
+
+ trace_seq_printf(s, "%s ", val ? "new" : "existing");
+
+ if (pevent_get_field_val(s, event, "gfn", record, &val, 1) < 0)
+ return -1;
+
+ trace_seq_printf(s, "sp gfn %llx ", val);
+ return kvm_mmu_print_role(s, record, event, context);
+}
+
+#define PT_WRITABLE_SHIFT 1
+#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
+
+static unsigned long long
+process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
+{
+ unsigned long pte = args[0];
+ return pte & PT_WRITABLE_MASK;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ init_disassembler();
+
+ pevent_register_event_handler(pevent, -1, "kvm", "kvm_exit",
+ kvm_exit_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+ kvm_emulate_insn_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+ kvm_mmu_get_page_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+ kvm_mmu_print_role, NULL);
+
+ pevent_register_event_handler(pevent, -1,
+ "kvmmmu", "kvm_mmu_unsync_page",
+ kvm_mmu_print_role, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+ kvm_mmu_print_role, NULL);
+
+ pevent_register_event_handler(pevent, -1, "kvmmmu",
+ "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
+ NULL);
+
+ pevent_register_print_function(pevent,
+ process_is_writable_pte,
+ PEVENT_FUNC_ARG_INT,
+ "is_writable_pte",
+ PEVENT_FUNC_ARG_LONG,
+ PEVENT_FUNC_ARG_VOID);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_exit",
+ kvm_exit_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+ kvm_emulate_insn_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+ kvm_mmu_get_page_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+ kvm_mmu_print_role, NULL);
+
+ pevent_unregister_event_handler(pevent, -1,
+ "kvmmmu", "kvm_mmu_unsync_page",
+ kvm_mmu_print_role, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+ kvm_mmu_print_role, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "kvmmmu",
+ "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
+ NULL);
+
+ pevent_unregister_print_function(pevent, process_is_writable_pte,
+ "is_writable_pte");
+}
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c
new file mode 100644
index 0000000..7e15a0f
--- /dev/null
+++ b/tools/lib/traceevent/plugin_mac80211.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+
+#define INDENT 65
+
+static void print_string(struct trace_seq *s, struct event_format *event,
+ const char *name, const void *data)
+{
+ struct format_field *f = pevent_find_field(event, name);
+ int offset;
+ int length;
+
+ if (!f) {
+ trace_seq_printf(s, "NOTFOUND:%s", name);
+ return;
+ }
+
+ offset = f->offset;
+ length = f->size;
+
+ if (!strncmp(f->type, "__data_loc", 10)) {
+ unsigned long long v;
+ if (pevent_read_number_field(f, data, &v)) {
+ trace_seq_printf(s, "invalid_data_loc");
+ return;
+ }
+ offset = v & 0xffff;
+ length = v >> 16;
+ }
+
+ trace_seq_printf(s, "%.*s", length, (char *)data + offset);
+}
+
+#define SF(fn) pevent_print_num_field(s, fn ":%d", event, fn, record, 0)
+#define SFX(fn) pevent_print_num_field(s, fn ":%#x", event, fn, record, 0)
+#define SP() trace_seq_putc(s, ' ')
+
+static int drv_bss_info_changed(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ void *data = record->data;
+
+ print_string(s, event, "wiphy_name", data);
+ trace_seq_printf(s, " vif:");
+ print_string(s, event, "vif_name", data);
+ pevent_print_num_field(s, "(%d)", event, "vif_type", record, 1);
+
+ trace_seq_printf(s, "\n%*s", INDENT, "");
+ SF("assoc"); SP();
+ SF("aid"); SP();
+ SF("cts"); SP();
+ SF("shortpre"); SP();
+ SF("shortslot"); SP();
+ SF("dtimper"); SP();
+ trace_seq_printf(s, "\n%*s", INDENT, "");
+ SF("bcnint"); SP();
+ SFX("assoc_cap"); SP();
+ SFX("basic_rates"); SP();
+ SF("enable_beacon");
+ trace_seq_printf(s, "\n%*s", INDENT, "");
+ SF("ht_operation_mode");
+
+ return 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_event_handler(pevent, -1, "mac80211",
+ "drv_bss_info_changed",
+ drv_bss_info_changed, NULL);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_event_handler(pevent, -1, "mac80211",
+ "drv_bss_info_changed",
+ drv_bss_info_changed, NULL);
+}
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
new file mode 100644
index 0000000..f1ce600
--- /dev/null
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+
+static void write_state(struct trace_seq *s, int val)
+{
+ const char states[] = "SDTtZXxW";
+ int found = 0;
+ int i;
+
+ for (i = 0; i < (sizeof(states) - 1); i++) {
+ if (!(val & (1 << i)))
+ continue;
+
+ if (found)
+ trace_seq_putc(s, '|');
+
+ found = 1;
+ trace_seq_putc(s, states[i]);
+ }
+
+ if (!found)
+ trace_seq_putc(s, 'R');
+}
+
+static void write_and_save_comm(struct format_field *field,
+ struct pevent_record *record,
+ struct trace_seq *s, int pid)
+{
+ const char *comm;
+ int len;
+
+ comm = (char *)(record->data + field->offset);
+ len = s->len;
+ trace_seq_printf(s, "%.*s",
+ field->size, comm);
+
+ /* make sure the comm has a \0 at the end. */
+ trace_seq_terminate(s);
+ comm = &s->buffer[len];
+
+ /* Help out the comm to ids. This will handle dups */
+ pevent_register_comm(field->event->pevent, comm, pid);
+}
+
+static int sched_wakeup_handler(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ struct format_field *field;
+ unsigned long long val;
+
+ if (pevent_get_field_val(s, event, "pid", record, &val, 1))
+ return trace_seq_putc(s, '!');
+
+ field = pevent_find_any_field(event, "comm");
+ if (field) {
+ write_and_save_comm(field, record, s, val);
+ trace_seq_putc(s, ':');
+ }
+ trace_seq_printf(s, "%lld", val);
+
+ if (pevent_get_field_val(s, event, "prio", record, &val, 0) == 0)
+ trace_seq_printf(s, " [%lld]", val);
+
+ if (pevent_get_field_val(s, event, "success", record, &val, 1) == 0)
+ trace_seq_printf(s, " success=%lld", val);
+
+ if (pevent_get_field_val(s, event, "target_cpu", record, &val, 0) == 0)
+ trace_seq_printf(s, " CPU:%03llu", val);
+
+ return 0;
+}
+
+static int sched_switch_handler(struct trace_seq *s,
+ struct pevent_record *record,
+ struct event_format *event, void *context)
+{
+ struct format_field *field;
+ unsigned long long val;
+
+ if (pevent_get_field_val(s, event, "prev_pid", record, &val, 1))
+ return trace_seq_putc(s, '!');
+
+ field = pevent_find_any_field(event, "prev_comm");
+ if (field) {
+ write_and_save_comm(field, record, s, val);
+ trace_seq_putc(s, ':');
+ }
+ trace_seq_printf(s, "%lld ", val);
+
+ if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
+ trace_seq_printf(s, "[%lld] ", val);
+
+ if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
+ write_state(s, val);
+
+ trace_seq_puts(s, " ==> ");
+
+ if (pevent_get_field_val(s, event, "next_pid", record, &val, 1))
+ return trace_seq_putc(s, '!');
+
+ field = pevent_find_any_field(event, "next_comm");
+ if (field) {
+ write_and_save_comm(field, record, s, val);
+ trace_seq_putc(s, ':');
+ }
+ trace_seq_printf(s, "%lld", val);
+
+ if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
+ trace_seq_printf(s, " [%lld]", val);
+
+ return 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_event_handler(pevent, -1, "sched", "sched_switch",
+ sched_switch_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "sched", "sched_wakeup",
+ sched_wakeup_handler, NULL);
+
+ pevent_register_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+ sched_wakeup_handler, NULL);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_event_handler(pevent, -1, "sched", "sched_switch",
+ sched_switch_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "sched", "sched_wakeup",
+ sched_wakeup_handler, NULL);
+
+ pevent_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+ sched_wakeup_handler, NULL);
+}
diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c
new file mode 100644
index 0000000..eda326f
--- /dev/null
+++ b/tools/lib/traceevent/plugin_scsi.c
@@ -0,0 +1,429 @@
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include "event-parse.h"
+
+typedef unsigned long sector_t;
+typedef uint64_t u64;
+typedef unsigned int u32;
+
+/*
+ * SCSI opcodes
+ */
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define INITIALIZE_ELEMENT_STATUS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define READ_FORMAT_CAPACITIES 0x23
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define POSITION_TO_ELEMENT 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define UNMAP 0x42
+#define READ_TOC 0x43
+#define READ_HEADER 0x44
+#define GET_EVENT_STATUS_NOTIFICATION 0x4a
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define XDWRITEREAD_10 0x53
+#define MODE_SELECT_10 0x55
+#define RESERVE_10 0x56
+#define RELEASE_10 0x57
+#define MODE_SENSE_10 0x5a
+#define PERSISTENT_RESERVE_IN 0x5e
+#define PERSISTENT_RESERVE_OUT 0x5f
+#define VARIABLE_LENGTH_CMD 0x7f
+#define REPORT_LUNS 0xa0
+#define SECURITY_PROTOCOL_IN 0xa2
+#define MAINTENANCE_IN 0xa3
+#define MAINTENANCE_OUT 0xa4
+#define MOVE_MEDIUM 0xa5
+#define EXCHANGE_MEDIUM 0xa6
+#define READ_12 0xa8
+#define WRITE_12 0xaa
+#define READ_MEDIA_SERIAL_NUMBER 0xab
+#define WRITE_VERIFY_12 0xae
+#define VERIFY_12 0xaf
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define SECURITY_PROTOCOL_OUT 0xb5
+#define READ_ELEMENT_STATUS 0xb8
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+#define EXTENDED_COPY 0x83
+#define RECEIVE_COPY_RESULTS 0x84
+#define ACCESS_CONTROL_IN 0x86
+#define ACCESS_CONTROL_OUT 0x87
+#define READ_16 0x88
+#define WRITE_16 0x8a
+#define READ_ATTRIBUTE 0x8c
+#define WRITE_ATTRIBUTE 0x8d
+#define VERIFY_16 0x8f
+#define SYNCHRONIZE_CACHE_16 0x91
+#define WRITE_SAME_16 0x93
+#define SERVICE_ACTION_IN 0x9e
+/* values for service action in */
+#define SAI_READ_CAPACITY_16 0x10
+#define SAI_GET_LBA_STATUS 0x12
+/* values for VARIABLE_LENGTH_CMD service action codes
+ * see spc4r17 Section D.3.5, table D.7 and D.8 */
+#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
+/* values for maintenance in */
+#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
+#define MI_REPORT_TARGET_PGS 0x0a
+#define MI_REPORT_ALIASES 0x0b
+#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
+#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
+#define MI_REPORT_PRIORITY 0x0e
+#define MI_REPORT_TIMESTAMP 0x0f
+#define MI_MANAGEMENT_PROTOCOL_IN 0x10
+/* value for MI_REPORT_TARGET_PGS ext header */
+#define MI_EXT_HDR_PARAM_FMT 0x20
+/* values for maintenance out */
+#define MO_SET_IDENTIFYING_INFORMATION 0x06
+#define MO_SET_TARGET_PGS 0x0a
+#define MO_CHANGE_ALIASES 0x0b
+#define MO_SET_PRIORITY 0x0e
+#define MO_SET_TIMESTAMP 0x0f
+#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for variable length command */
+#define XDREAD_32 0x03
+#define XDWRITE_32 0x04
+#define XPWRITE_32 0x06
+#define XDWRITEREAD_32 0x07
+#define READ_32 0x09
+#define VERIFY_32 0x0a
+#define WRITE_32 0x0b
+#define WRITE_SAME_32 0x0d
+
+#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
+#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+
+static const char *
+scsi_trace_misc(struct trace_seq *, unsigned char *, int);
+
+static const char *
+scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((cdb[1] & 0x1F) << 16);
+ lba |= (cdb[2] << 8);
+ lba |= cdb[3];
+ txlen = cdb[4];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu",
+ (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[7] << 8);
+ txlen |= cdb[8];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[6] << 24);
+ txlen |= (cdb[7] << 16);
+ txlen |= (cdb[8] << 8);
+ txlen |= cdb[9];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ txlen |= (cdb[10] << 24);
+ txlen |= (cdb[11] << 16);
+ txlen |= (cdb[12] << 8);
+ txlen |= cdb[13];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME_16)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0, txlen = 0;
+ u32 ei_lbrt = 0;
+
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ cmd = "READ";
+ break;
+ case VERIFY_32:
+ cmd = "VERIFY";
+ break;
+ case WRITE_32:
+ cmd = "WRITE";
+ break;
+ case WRITE_SAME_32:
+ cmd = "WRITE_SAME";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[12] << 56);
+ lba |= ((u64)cdb[13] << 48);
+ lba |= ((u64)cdb[14] << 40);
+ lba |= ((u64)cdb[15] << 32);
+ lba |= (cdb[16] << 24);
+ lba |= (cdb[17] << 16);
+ lba |= (cdb[18] << 8);
+ lba |= cdb[19];
+ ei_lbrt |= (cdb[20] << 24);
+ ei_lbrt |= (cdb[21] << 16);
+ ei_lbrt |= (cdb[22] << 8);
+ ei_lbrt |= cdb[23];
+ txlen |= (cdb[28] << 24);
+ txlen |= (cdb[29] << 16);
+ txlen |= (cdb[30] << 8);
+ txlen |= cdb[31];
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
+ cmd, (unsigned long long)lba,
+ (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+
+ if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
+ trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
+
+out:
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ unsigned int regions = cdb[7] << 8 | cdb[8];
+
+ trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0;
+ u32 alloc_len = 0;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case SAI_READ_CAPACITY_16:
+ cmd = "READ_CAPACITY_16";
+ break;
+ case SAI_GET_LBA_STATUS:
+ cmd = "GET_LBA_STATUS";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ alloc_len |= (cdb[10] << 24);
+ alloc_len |= (cdb[11] << 16);
+ alloc_len |= (cdb[12] << 8);
+ alloc_len |= cdb[13];
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
+ (unsigned long long)lba, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *
+scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return scsi_trace_rw32(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
+
+static const char *
+scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+
+ trace_seq_printf(p, "-");
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *
+scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (cdb[0]) {
+ case READ_6:
+ case WRITE_6:
+ return scsi_trace_rw6(p, cdb, len);
+ case READ_10:
+ case VERIFY:
+ case WRITE_10:
+ case WRITE_SAME:
+ return scsi_trace_rw10(p, cdb, len);
+ case READ_12:
+ case VERIFY_12:
+ case WRITE_12:
+ return scsi_trace_rw12(p, cdb, len);
+ case READ_16:
+ case VERIFY_16:
+ case WRITE_16:
+ case WRITE_SAME_16:
+ return scsi_trace_rw16(p, cdb, len);
+ case UNMAP:
+ return scsi_trace_unmap(p, cdb, len);
+ case SERVICE_ACTION_IN:
+ return scsi_trace_service_action_in(p, cdb, len);
+ case VARIABLE_LENGTH_CMD:
+ return scsi_trace_varlen(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
+
+unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s,
+ unsigned long long *args)
+{
+ scsi_trace_parse_cdb(s, (unsigned char *) (unsigned long) args[1], args[2]);
+ return 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_print_function(pevent,
+ process_scsi_trace_parse_cdb,
+ PEVENT_FUNC_ARG_STRING,
+ "scsi_trace_parse_cdb",
+ PEVENT_FUNC_ARG_PTR,
+ PEVENT_FUNC_ARG_PTR,
+ PEVENT_FUNC_ARG_INT,
+ PEVENT_FUNC_ARG_VOID);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_print_function(pevent, process_scsi_trace_parse_cdb,
+ "scsi_trace_parse_cdb");
+}
diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugin_xen.c
new file mode 100644
index 0000000..3a413ea
--- /dev/null
+++ b/tools/lib/traceevent/plugin_xen.c
@@ -0,0 +1,136 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "event-parse.h"
+
+#define __HYPERVISOR_set_trap_table 0
+#define __HYPERVISOR_mmu_update 1
+#define __HYPERVISOR_set_gdt 2
+#define __HYPERVISOR_stack_switch 3
+#define __HYPERVISOR_set_callbacks 4
+#define __HYPERVISOR_fpu_taskswitch 5
+#define __HYPERVISOR_sched_op_compat 6
+#define __HYPERVISOR_dom0_op 7
+#define __HYPERVISOR_set_debugreg 8
+#define __HYPERVISOR_get_debugreg 9
+#define __HYPERVISOR_update_descriptor 10
+#define __HYPERVISOR_memory_op 12
+#define __HYPERVISOR_multicall 13
+#define __HYPERVISOR_update_va_mapping 14
+#define __HYPERVISOR_set_timer_op 15
+#define __HYPERVISOR_event_channel_op_compat 16
+#define __HYPERVISOR_xen_version 17
+#define __HYPERVISOR_console_io 18
+#define __HYPERVISOR_physdev_op_compat 19
+#define __HYPERVISOR_grant_table_op 20
+#define __HYPERVISOR_vm_assist 21
+#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_iret 23 /* x86 only */
+#define __HYPERVISOR_vcpu_op 24
+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
+#define __HYPERVISOR_mmuext_op 26
+#define __HYPERVISOR_acm_op 27
+#define __HYPERVISOR_nmi_op 28
+#define __HYPERVISOR_sched_op 29
+#define __HYPERVISOR_callback_op 30
+#define __HYPERVISOR_xenoprof_op 31
+#define __HYPERVISOR_event_channel_op 32
+#define __HYPERVISOR_physdev_op 33
+#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_tmem_op 38
+
+/* Architecture-specific hypercall definitions. */
+#define __HYPERVISOR_arch_0 48
+#define __HYPERVISOR_arch_1 49
+#define __HYPERVISOR_arch_2 50
+#define __HYPERVISOR_arch_3 51
+#define __HYPERVISOR_arch_4 52
+#define __HYPERVISOR_arch_5 53
+#define __HYPERVISOR_arch_6 54
+#define __HYPERVISOR_arch_7 55
+
+#define N(x) [__HYPERVISOR_##x] = "("#x")"
+static const char *xen_hypercall_names[] = {
+ N(set_trap_table),
+ N(mmu_update),
+ N(set_gdt),
+ N(stack_switch),
+ N(set_callbacks),
+ N(fpu_taskswitch),
+ N(sched_op_compat),
+ N(dom0_op),
+ N(set_debugreg),
+ N(get_debugreg),
+ N(update_descriptor),
+ N(memory_op),
+ N(multicall),
+ N(update_va_mapping),
+ N(set_timer_op),
+ N(event_channel_op_compat),
+ N(xen_version),
+ N(console_io),
+ N(physdev_op_compat),
+ N(grant_table_op),
+ N(vm_assist),
+ N(update_va_mapping_otherdomain),
+ N(iret),
+ N(vcpu_op),
+ N(set_segment_base),
+ N(mmuext_op),
+ N(acm_op),
+ N(nmi_op),
+ N(sched_op),
+ N(callback_op),
+ N(xenoprof_op),
+ N(event_channel_op),
+ N(physdev_op),
+ N(hvm_op),
+
+/* Architecture-specific hypercall definitions. */
+ N(arch_0),
+ N(arch_1),
+ N(arch_2),
+ N(arch_3),
+ N(arch_4),
+ N(arch_5),
+ N(arch_6),
+ N(arch_7),
+};
+#undef N
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+static const char *xen_hypercall_name(unsigned op)
+{
+ if (op < ARRAY_SIZE(xen_hypercall_names) &&
+ xen_hypercall_names[op] != NULL)
+ return xen_hypercall_names[op];
+
+ return "";
+}
+
+unsigned long long process_xen_hypercall_name(struct trace_seq *s,
+ unsigned long long *args)
+{
+ unsigned int op = args[0];
+
+ trace_seq_printf(s, "%s", xen_hypercall_name(op));
+ return 0;
+}
+
+int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+{
+ pevent_register_print_function(pevent,
+ process_xen_hypercall_name,
+ PEVENT_FUNC_ARG_STRING,
+ "xen_hypercall_name",
+ PEVENT_FUNC_ARG_INT,
+ PEVENT_FUNC_ARG_VOID);
+ return 0;
+}
+
+void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+{
+ pevent_unregister_print_function(pevent, process_xen_hypercall_name,
+ "xen_hypercall_name");
+}
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index d7f2e68..ec3bd16 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -22,6 +22,7 @@
#include <string.h>
#include <stdarg.h>
+#include <asm/bug.h>
#include "event-parse.h"
#include "event-utils.h"
@@ -32,10 +33,21 @@
#define TRACE_SEQ_POISON ((void *)0xdeadbeef)
#define TRACE_SEQ_CHECK(s) \
do { \
- if ((s)->buffer == TRACE_SEQ_POISON) \
- die("Usage of trace_seq after it was destroyed"); \
+ if (WARN_ONCE((s)->buffer == TRACE_SEQ_POISON, \
+ "Usage of trace_seq after it was destroyed")) \
+ (s)->state = TRACE_SEQ__BUFFER_POISONED; \
} while (0)
+#define TRACE_SEQ_CHECK_RET_N(s, n) \
+do { \
+ TRACE_SEQ_CHECK(s); \
+ if ((s)->state != TRACE_SEQ__GOOD) \
+ return n; \
+} while (0)
+
+#define TRACE_SEQ_CHECK_RET(s) TRACE_SEQ_CHECK_RET_N(s, )
+#define TRACE_SEQ_CHECK_RET0(s) TRACE_SEQ_CHECK_RET_N(s, 0)
+
/**
* trace_seq_init - initialize the trace_seq structure
* @s: a pointer to the trace_seq structure to initialize
@@ -45,7 +57,11 @@ void trace_seq_init(struct trace_seq *s)
s->len = 0;
s->readpos = 0;
s->buffer_size = TRACE_SEQ_BUF_SIZE;
- s->buffer = malloc_or_die(s->buffer_size);
+ s->buffer = malloc(s->buffer_size);
+ if (s->buffer != NULL)
+ s->state = TRACE_SEQ__GOOD;
+ else
+ s->state = TRACE_SEQ__MEM_ALLOC_FAILED;
}
/**
@@ -71,17 +87,23 @@ void trace_seq_destroy(struct trace_seq *s)
{
if (!s)
return;
- TRACE_SEQ_CHECK(s);
+ TRACE_SEQ_CHECK_RET(s);
free(s->buffer);
s->buffer = TRACE_SEQ_POISON;
}
static void expand_buffer(struct trace_seq *s)
{
+ char *buf;
+
+ buf = realloc(s->buffer, s->buffer_size + TRACE_SEQ_BUF_SIZE);
+ if (WARN_ONCE(!buf, "Can't allocate trace_seq buffer memory")) {
+ s->state = TRACE_SEQ__MEM_ALLOC_FAILED;
+ return;
+ }
+
+ s->buffer = buf;
s->buffer_size += TRACE_SEQ_BUF_SIZE;
- s->buffer = realloc(s->buffer, s->buffer_size);
- if (!s->buffer)
- die("Can't allocate trace_seq buffer memory");
}
/**
@@ -105,9 +127,9 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
int len;
int ret;
- TRACE_SEQ_CHECK(s);
-
try_again:
+ TRACE_SEQ_CHECK_RET0(s);
+
len = (s->buffer_size - 1) - s->len;
va_start(ap, fmt);
@@ -141,9 +163,9 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
int len;
int ret;
- TRACE_SEQ_CHECK(s);
-
try_again:
+ TRACE_SEQ_CHECK_RET0(s);
+
len = (s->buffer_size - 1) - s->len;
ret = vsnprintf(s->buffer + s->len, len, fmt, args);
@@ -172,13 +194,15 @@ int trace_seq_puts(struct trace_seq *s, const char *str)
{
int len;
- TRACE_SEQ_CHECK(s);
+ TRACE_SEQ_CHECK_RET0(s);
len = strlen(str);
while (len > ((s->buffer_size - 1) - s->len))
expand_buffer(s);
+ TRACE_SEQ_CHECK_RET0(s);
+
memcpy(s->buffer + s->len, str, len);
s->len += len;
@@ -187,11 +211,13 @@ int trace_seq_puts(struct trace_seq *s, const char *str)
int trace_seq_putc(struct trace_seq *s, unsigned char c)
{
- TRACE_SEQ_CHECK(s);
+ TRACE_SEQ_CHECK_RET0(s);
while (s->len >= (s->buffer_size - 1))
expand_buffer(s);
+ TRACE_SEQ_CHECK_RET0(s);
+
s->buffer[s->len++] = c;
return 1;
@@ -199,7 +225,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
void trace_seq_terminate(struct trace_seq *s)
{
- TRACE_SEQ_CHECK(s);
+ TRACE_SEQ_CHECK_RET(s);
/* There's always one character left on the buffer */
s->buffer[s->len] = 0;
@@ -208,5 +234,16 @@ void trace_seq_terminate(struct trace_seq *s)
int trace_seq_do_printf(struct trace_seq *s)
{
TRACE_SEQ_CHECK(s);
- return printf("%.*s", s->len, s->buffer);
+
+ switch (s->state) {
+ case TRACE_SEQ__GOOD:
+ return printf("%.*s", s->len, s->buffer);
+ case TRACE_SEQ__BUFFER_POISONED:
+ puts("Usage of trace_seq after it was destroyed");
+ break;
+ case TRACE_SEQ__MEM_ALLOC_FAILED:
+ puts("Can't allocate trace_seq buffer memory");
+ break;
+ }
+ return -1;
}
diff --git a/tools/perf/Documentation/perf-archive.txt b/tools/perf/Documentation/perf-archive.txt
index 5032a14..ac6ecbb 100644
--- a/tools/perf/Documentation/perf-archive.txt
+++ b/tools/perf/Documentation/perf-archive.txt
@@ -12,9 +12,9 @@ SYNOPSIS
DESCRIPTION
-----------
-This command runs runs perf-buildid-list --with-hits, and collects the files
-with the buildids found so that analysis of perf.data contents can be possible
-on another machine.
+This command runs perf-buildid-list --with-hits, and collects the files with the
+buildids found so that analysis of perf.data contents can be possible on another
+machine.
SEE ALSO
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index 6a06cef..52276a6 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -10,9 +10,9 @@ SYNOPSIS
[verse]
'perf kvm' [--host] [--guest] [--guestmount=<path>
[--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
- {top|record|report|diff|buildid-list}
+ {top|record|report|diff|buildid-list} [<options>]
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
- | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat}
+ | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat} [<options>]
'perf kvm stat [record|report|live] [<options>]
DESCRIPTION
@@ -24,10 +24,17 @@ There are a couple of variants of perf kvm:
of an arbitrary workload.
'perf kvm record <command>' to record the performance counter profile
- of an arbitrary workload and save it into a perf data file. If both
- --host and --guest are input, the perf data file name is perf.data.kvm.
- If there is no --host but --guest, the file name is perf.data.guest.
- If there is no --guest but --host, the file name is perf.data.host.
+ of an arbitrary workload and save it into a perf data file. We set the
+ default behavior of perf kvm as --guest, so if neither --host nor --guest
+ is input, the perf data file name is perf.data.guest. If --host is input,
+ the perf data file name is perf.data.kvm. If you want to record data into
+ perf.data.host, please input --host --no-guest. The behaviors are shown as
+ following:
+ Default('') -> perf.data.guest
+ --host -> perf.data.kvm
+ --guest -> perf.data.guest
+ --host --guest -> perf.data.kvm
+ --host --no-guest -> perf.data.host
'perf kvm report' to display the performance counter profile information
recorded via perf kvm record.
@@ -37,7 +44,9 @@ There are a couple of variants of perf kvm:
'perf kvm buildid-list' to display the buildids found in a perf data file,
so that other tools can be used to fetch packages with matching symbol tables
- for use by perf report.
+ for use by perf report. As buildid is read from /sys/kernel/notes in os, then
+ if you want to list the buildid for guest, please make sure your perf data file
+ was captured with --guestmount in perf kvm record.
'perf kvm stat <command>' to run a command and gather performance counter
statistics.
@@ -58,14 +67,14 @@ There are a couple of variants of perf kvm:
OPTIONS
-------
-i::
---input=::
+--input=<path>::
Input file name.
-o::
---output::
+--output=<path>::
Output file name.
---host=::
+--host::
Collect host side performance profile.
---guest=::
+--guest::
Collect guest side performance profile.
--guestmount=<path>::
Guest os root file system mount directory. Users mounts guest os
@@ -84,6 +93,9 @@ OPTIONS
kernel module information. Users copy it out from guest os.
--guestvmlinux=<path>::
Guest os kernel vmlinux.
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc).
STAT REPORT OPTIONS
-------------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 43b42c4..c71b0f3 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -57,6 +57,8 @@ OPTIONS
-t::
--tid=::
Record events on existing thread ID (comma separated list).
+ This option also disables inheritance by default. Enable it by adding
+ --inherit.
-u::
--uid=::
@@ -66,8 +68,7 @@ OPTIONS
--realtime=::
Collect data with this RT SCHED_FIFO priority.
--D::
---no-delay::
+--no-buffering::
Collect data without buffering.
-c::
@@ -201,11 +202,16 @@ abort events and some memory events in precise mode on modern Intel CPUs.
--transaction::
Record transaction flags for transaction related events.
---force-per-cpu::
-Force the use of per-cpu mmaps. By default, when tasks are specified (i.e. -p,
--t or -u options) per-thread mmaps are created. This option overrides that and
-forces per-cpu mmaps. A side-effect of that is that inheritance is
-automatically enabled. Add the -i option also to disable inheritance.
+--per-thread::
+Use per-thread mmaps. By default per-cpu mmaps are created. This option
+overrides that and uses per-thread mmaps. A side-effect of that is that
+inheritance is automatically disabled. --per-thread is ignored with a warning
+if combined with -a or -C options.
+
+-D::
+--delay=::
+After starting the program, wait msecs before measuring. This is useful to
+filter out the startup phase of the program, which is often very different.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 10a2798..8eab8a4 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -237,6 +237,15 @@ OPTIONS
Do not show entries which have an overhead under that percent.
(Default: 0).
+--header::
+ Show header information in the perf.data file. This includes
+ various information like hostname, OS and perf version, cpu/mem
+ info, perf command line, event list and so on. Currently only
+ --stdio output supports this feature.
+
+--header-only::
+ Show only perf.data header (forces --stdio).
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-annotate[1]
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index e9cbfcd..05f9a0a 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -115,7 +115,7 @@ OPTIONS
-f::
--fields::
Comma separated list of fields to print. Options are:
- comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff.
+ comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, srcline.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace
@@ -203,6 +203,18 @@ OPTIONS
--show-kernel-path::
Try to resolve the path of [kernel.kallsyms]
+--show-task-events
+ Display task related events (e.g. FORK, COMM, EXIT).
+
+--show-mmap-events
+ Display mmap related events (e.g. MMAP, MMAP2).
+
+--header
+ Show perf.data header.
+
+--header-only
+ Show only perf.data header.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 80c7da6..29ee857 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -133,7 +133,7 @@ use --per-core in addition to -a. (system-wide). The output includes the
core number and the number of online logical processors on that physical processor.
-D msecs::
---initial-delay msecs::
+--delay msecs::
After starting the program, wait msecs before measuring. This is useful to
filter out the startup phase of the program, which is often very different.
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index 3ff8bd4..bc5990c 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -8,8 +8,7 @@ perf-timechart - Tool to visualize total system behavior during a workload
SYNOPSIS
--------
[verse]
-'perf timechart' record <command>
-'perf timechart' [<options>]
+'perf timechart' [<timechart options>] {record} [<record options>]
DESCRIPTION
-----------
@@ -21,8 +20,8 @@ There are two variants of perf timechart:
'perf timechart' to turn a trace into a Scalable Vector Graphics file,
that can be viewed with popular SVG viewers such as 'Inkscape'.
-OPTIONS
--------
+TIMECHART OPTIONS
+-----------------
-o::
--output=::
Select the output file (default: output.svg)
@@ -35,6 +34,9 @@ OPTIONS
-P::
--power-only::
Only output the CPU power section of the diagram
+-T::
+--tasks-only::
+ Don't output processor state transitions
-p::
--process::
Select the processes to display, by name or PID
@@ -54,6 +56,38 @@ $ perf timechart
Written 10.2 seconds of trace to output.svg.
+Record system-wide timechart:
+
+ $ perf timechart record
+
+ then generate timechart and highlight 'gcc' tasks:
+
+ $ perf timechart --highlight gcc
+
+-n::
+--proc-num::
+ Print task info for at least given number of tasks.
+-t::
+--topology::
+ Sort CPUs according to topology.
+--highlight=<duration_nsecs|task_name>::
+ Highlight tasks (using different color) that run more than given
+ duration or tasks with given name. If number is given it's interpreted
+ as number of nanoseconds. If non-numeric string is given it's
+ interpreted as task name.
+
+RECORD OPTIONS
+--------------
+-P::
+--power-only::
+ Record only power-related events
+-T::
+--tasks-only::
+ Record only tasks-related events
+-g::
+--callchain::
+ Do call-graph (stack chain/backtrace) recording
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 7de01dd..cdd8d49 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -50,7 +50,6 @@ Default is to monitor all CPUS.
--count-filter=<count>::
Only display functions with more events than this.
--g::
--group::
Put the counters into a counter group.
@@ -143,12 +142,12 @@ Default is to monitor all CPUS.
--asm-raw::
Show raw instruction encoding of assembly instructions.
--G::
+-g::
Enables call-graph (stack chain/backtrace) recording.
--call-graph::
Setup and enable call-graph (stack chain/backtrace) recording,
- implies -G.
+ implies -g.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 025de79..f41572d 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,7 +1,11 @@
tools/perf
tools/scripts
tools/lib/traceevent
-tools/lib/lk
+tools/lib/api
+tools/lib/symbol/kallsyms.c
+tools/lib/symbol/kallsyms.h
+tools/include/asm/bug.h
+tools/include/linux/compiler.h
include/linux/const.h
include/linux/perf_event.h
include/linux/rbtree.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 4835618..cb2e586 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -60,8 +60,11 @@ endef
#
# Needed if no target specified:
+# (Except for tags and TAGS targets. The reason is that the
+# Makefile does not treat tags/TAGS as targets but as files
+# and thus won't rebuilt them once they are in place.)
#
-all:
+all tags TAGS:
$(print_msg)
$(make)
@@ -72,8 +75,16 @@ clean:
$(make)
#
+# The build-test target is not really parallel, don't print the jobs info:
+#
+build-test:
+ @$(MAKE) -f tests/make --no-print-directory
+
+#
# All other targets get passed through:
#
%:
$(print_msg)
$(make)
+
+.PHONY: tags TAGS
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 7fc8f17..7257e7e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -76,6 +76,7 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
+PKG_CONFIG = $(CROSS_COMPILE)pkg-config
RM = rm -f
LN = ln -f
@@ -86,7 +87,7 @@ FLEX = flex
BISON = bison
STRIP = strip
-LK_DIR = $(srctree)/tools/lib/lk/
+LIB_DIR = $(srctree)/tools/lib/api/
TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
# include config/Makefile by default and rule out
@@ -105,7 +106,7 @@ ifeq ($(config),1)
include config/Makefile
endif
-export prefix bindir sharedir sysconfdir
+export prefix bindir sharedir sysconfdir DESTDIR
# sparse is architecture-neutral, which means that we need to tell it
# explicitly what architecture to check for. Fix this up for yours..
@@ -127,20 +128,20 @@ strip-libs = $(filter-out -l%,$(1))
ifneq ($(OUTPUT),)
TE_PATH=$(OUTPUT)
ifneq ($(subdir),)
- LK_PATH=$(OUTPUT)/../lib/lk/
+ LIB_PATH=$(OUTPUT)/../lib/api/
else
- LK_PATH=$(OUTPUT)
+ LIB_PATH=$(OUTPUT)
endif
else
TE_PATH=$(TRACE_EVENT_DIR)
- LK_PATH=$(LK_DIR)
+ LIB_PATH=$(LIB_DIR)
endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
export LIBTRACEEVENT
-LIBLK = $(LK_PATH)liblk.a
-export LIBLK
+LIBAPIKFS = $(LIB_PATH)libapikfs.a
+export LIBAPIKFS
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
@@ -151,7 +152,7 @@ export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
-PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBLK)
+PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPIKFS)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
$(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \
@@ -202,6 +203,7 @@ $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
LIB_FILE=$(OUTPUT)libperf.a
+LIB_H += ../lib/symbol/kallsyms.h
LIB_H += ../../include/uapi/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
@@ -210,7 +212,7 @@ LIB_H += ../../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
-LIB_H += util/include/linux/compiler.h
+LIB_H += ../include/linux/compiler.h
LIB_H += util/include/linux/const.h
LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
@@ -225,7 +227,7 @@ LIB_H += util/include/linux/string.h
LIB_H += util/include/linux/types.h
LIB_H += util/include/linux/linkage.h
LIB_H += util/include/asm/asm-offsets.h
-LIB_H += util/include/asm/bug.h
+LIB_H += ../include/asm/bug.h
LIB_H += util/include/asm/byteorder.h
LIB_H += util/include/asm/hweight.h
LIB_H += util/include/asm/swab.h
@@ -312,6 +314,7 @@ LIB_OBJS += $(OUTPUT)util/evlist.o
LIB_OBJS += $(OUTPUT)util/evsel.o
LIB_OBJS += $(OUTPUT)util/exec_cmd.o
LIB_OBJS += $(OUTPUT)util/help.o
+LIB_OBJS += $(OUTPUT)util/kallsyms.o
LIB_OBJS += $(OUTPUT)util/levenshtein.o
LIB_OBJS += $(OUTPUT)util/parse-options.o
LIB_OBJS += $(OUTPUT)util/parse-events.o
@@ -353,6 +356,7 @@ LIB_OBJS += $(OUTPUT)util/pmu-bison.o
LIB_OBJS += $(OUTPUT)util/trace-event-read.o
LIB_OBJS += $(OUTPUT)util/trace-event-info.o
LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
+LIB_OBJS += $(OUTPUT)util/trace-event.o
LIB_OBJS += $(OUTPUT)util/svghelper.o
LIB_OBJS += $(OUTPUT)util/sort.o
LIB_OBJS += $(OUTPUT)util/hist.o
@@ -438,7 +442,7 @@ BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
BUILTIN_OBJS += $(OUTPUT)builtin-mem.o
-PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT)
+PERFLIBS = $(LIB_FILE) $(LIBAPIKFS) $(LIBTRACEEVENT)
# We choose to avoid "if .. else if .. else .. endif endif"
# because maintaining the nesting to match is a pain. If
@@ -486,6 +490,7 @@ ifndef NO_SLANG
LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)ui/browsers/map.o
LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
+ LIB_OBJS += $(OUTPUT)ui/browsers/header.o
LIB_OBJS += $(OUTPUT)ui/tui/setup.o
LIB_OBJS += $(OUTPUT)ui/tui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
@@ -671,6 +676,9 @@ $(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
+
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
@@ -710,26 +718,33 @@ $(LIB_FILE): $(LIB_OBJS)
# libtraceevent.a
TE_SOURCES = $(wildcard $(TRACE_EVENT_DIR)*.[ch])
-$(LIBTRACEEVENT): $(TE_SOURCES)
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) CFLAGS="-g -Wall $(EXTRA_CFLAGS)" libtraceevent.a
+LIBTRACEEVENT_FLAGS = $(QUIET_SUBDIR1) O=$(OUTPUT)
+LIBTRACEEVENT_FLAGS += CFLAGS="-g -Wall $(EXTRA_CFLAGS)"
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
+
+$(LIBTRACEEVENT): $(TE_SOURCES) $(OUTPUT)PERF-CFLAGS
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) libtraceevent.a plugins
$(LIBTRACEEVENT)-clean:
$(call QUIET_CLEAN, libtraceevent)
@$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
-LIBLK_SOURCES = $(wildcard $(LK_PATH)*.[ch])
+install-traceevent-plugins: $(LIBTRACEEVENT)
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) install_plugins
+
+LIBAPIKFS_SOURCES = $(wildcard $(LIB_PATH)fs/*.[ch])
# if subdir is set, we've been called from above so target has been built
# already
-$(LIBLK): $(LIBLK_SOURCES)
+$(LIBAPIKFS): $(LIBAPIKFS_SOURCES)
ifeq ($(subdir),)
- $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) liblk.a
+ $(QUIET_SUBDIR0)$(LIB_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libapikfs.a
endif
-$(LIBLK)-clean:
+$(LIBAPIKFS)-clean:
ifeq ($(subdir),)
- $(call QUIET_CLEAN, liblk)
- @$(MAKE) -C $(LK_DIR) O=$(OUTPUT) clean >/dev/null
+ $(call QUIET_CLEAN, libapikfs)
+ @$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
endif
help:
@@ -785,7 +800,7 @@ cscope:
### Detect prefix changes
TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\
- $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
+ $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):$(plugindir_SQ)
$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
@FLAGS='$(TRACK_CFLAGS)'; \
@@ -840,16 +855,16 @@ ifndef NO_LIBPYTHON
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
- $(call QUIET_INSTALL, bash_completion-script) \
+ $(call QUIET_INSTALL, perf_completion-script) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
- $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+ $(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
$(call QUIET_INSTALL, tests) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
-install: install-bin try-install-man
+install: install-bin try-install-man install-traceevent-plugins
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
@@ -868,12 +883,11 @@ config-clean:
$(call QUIET_CLEAN, config)
@$(MAKE) -C config/feature-checks clean >/dev/null
-clean: $(LIBTRACEEVENT)-clean $(LIBLK)-clean config-clean
+clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
- $(call QUIET_CLEAN, Documentation)
- @$(MAKE) -C Documentation O=$(OUTPUT) clean >/dev/null
+ $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
#
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index aacef07..42faf36 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -154,8 +154,7 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
}
if (lookup_path(buf))
goto out;
- free(buf);
- buf = NULL;
+ zfree(&buf);
}
if (!strcmp(arch, "arm"))
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 4087ab1..0da603b 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -69,15 +69,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
if (he == NULL)
return -ENOMEM;
- ret = 0;
- if (he->ms.sym != NULL) {
- struct annotation *notes = symbol__annotation(he->ms.sym);
- if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
- return -ENOMEM;
-
- ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
- }
-
+ ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
return ret;
@@ -188,8 +180,7 @@ find_next:
* symbol, free he->ms.sym->src to signal we already
* processed this symbol.
*/
- free(notes->src);
- notes->src = NULL;
+ zfree(&notes->src);
}
}
}
@@ -241,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
perf_session__fprintf_dsos(session, stdout);
total_nr_samples = 0;
- list_for_each_entry(pos, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, pos) {
struct hists *hists = &pos->hists;
u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
@@ -373,7 +364,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc) {
/*
- * Special case: if there's an argument left then assume tha
+ * Special case: if there's an argument left then assume that
* it's a symbol filter:
*/
if (argc > 1)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 3b67ea2..a77e312 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -356,9 +356,10 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
{
struct perf_evsel *e;
- list_for_each_entry(e, &evlist->entries, node)
+ evlist__for_each(evlist, e) {
if (perf_evsel__match2(evsel, e))
return e;
+ }
return NULL;
}
@@ -367,7 +368,7 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
struct hists *hists = &evsel->hists;
hists__collapse_resort(hists, NULL);
@@ -614,7 +615,7 @@ static void data_process(void)
struct perf_evsel *evsel_base;
bool first = true;
- list_for_each_entry(evsel_base, &evlist_base->entries, node) {
+ evlist__for_each(evlist_base, evsel_base) {
struct data__file *d;
int i;
@@ -654,7 +655,7 @@ static void data__free(struct data__file *d)
for (col = 0; col < PERF_HPP_DIFF__MAX_INDEX; col++) {
struct diff_hpp_fmt *fmt = &d->fmt[col];
- free(fmt->header);
+ zfree(&fmt->header);
}
}
@@ -769,6 +770,81 @@ static int hpp__entry_baseline(struct hist_entry *he, char *buf, size_t size)
return ret;
}
+static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he,
+ int comparison_method)
+{
+ struct diff_hpp_fmt *dfmt =
+ container_of(fmt, struct diff_hpp_fmt, fmt);
+ struct hist_entry *pair = get_pair_fmt(he, dfmt);
+ double diff;
+ s64 wdiff;
+ char pfmt[20] = " ";
+
+ if (!pair)
+ goto dummy_print;
+
+ switch (comparison_method) {
+ case COMPUTE_DELTA:
+ if (pair->diff.computed)
+ diff = pair->diff.period_ratio_delta;
+ else
+ diff = compute_delta(he, pair);
+
+ if (fabs(diff) < 0.01)
+ goto dummy_print;
+ scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
+ return percent_color_snprintf(hpp->buf, hpp->size,
+ pfmt, diff);
+ case COMPUTE_RATIO:
+ if (he->dummy)
+ goto dummy_print;
+ if (pair->diff.computed)
+ diff = pair->diff.period_ratio;
+ else
+ diff = compute_ratio(he, pair);
+
+ scnprintf(pfmt, 20, "%%%d.6f", dfmt->header_width);
+ return value_color_snprintf(hpp->buf, hpp->size,
+ pfmt, diff);
+ case COMPUTE_WEIGHTED_DIFF:
+ if (he->dummy)
+ goto dummy_print;
+ if (pair->diff.computed)
+ wdiff = pair->diff.wdiff;
+ else
+ wdiff = compute_wdiff(he, pair);
+
+ scnprintf(pfmt, 20, "%%14ld", dfmt->header_width);
+ return color_snprintf(hpp->buf, hpp->size,
+ get_percent_color(wdiff),
+ pfmt, wdiff);
+ default:
+ BUG_ON(1);
+ }
+dummy_print:
+ return scnprintf(hpp->buf, hpp->size, "%*s",
+ dfmt->header_width, pfmt);
+}
+
+static int hpp__color_delta(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ return __hpp__color_compare(fmt, hpp, he, COMPUTE_DELTA);
+}
+
+static int hpp__color_ratio(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ return __hpp__color_compare(fmt, hpp, he, COMPUTE_RATIO);
+}
+
+static int hpp__color_wdiff(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ return __hpp__color_compare(fmt, hpp, he, COMPUTE_WEIGHTED_DIFF);
+}
+
static void
hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size)
{
@@ -940,8 +1016,22 @@ static void data__hpp_register(struct data__file *d, int idx)
fmt->entry = hpp__entry_global;
/* TODO more colors */
- if (idx == PERF_HPP_DIFF__BASELINE)
+ switch (idx) {
+ case PERF_HPP_DIFF__BASELINE:
fmt->color = hpp__color_baseline;
+ break;
+ case PERF_HPP_DIFF__DELTA:
+ fmt->color = hpp__color_delta;
+ break;
+ case PERF_HPP_DIFF__RATIO:
+ fmt->color = hpp__color_ratio;
+ break;
+ case PERF_HPP_DIFF__WEIGHTED_DIFF:
+ fmt->color = hpp__color_wdiff;
+ break;
+ default:
+ break;
+ }
init_header(d, dfmt);
perf_hpp__column_register(fmt);
@@ -1000,8 +1090,7 @@ static int data_init(int argc, const char **argv)
data__files_cnt = argc;
use_default = false;
}
- } else if (symbol_conf.default_guest_vmlinux_name ||
- symbol_conf.default_guest_kallsyms) {
+ } else if (perf_guest) {
defaults[0] = "perf.data.host";
defaults[1] = "perf.data.guest";
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 20b0f12..c99e0de 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -29,7 +29,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
if (session == NULL)
return -ENOMEM;
- list_for_each_entry(pos, &session->evlist->entries, node)
+ evlist__for_each(session->evlist, pos)
perf_evsel__fprintf(pos, details, stdout);
perf_session__delete(session);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 6a25085..b346601 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -22,14 +22,13 @@
#include <linux/list.h>
struct perf_inject {
- struct perf_tool tool;
- bool build_ids;
- bool sched_stat;
- const char *input_name;
- int pipe_output,
- output;
- u64 bytes_written;
- struct list_head samples;
+ struct perf_tool tool;
+ bool build_ids;
+ bool sched_stat;
+ const char *input_name;
+ struct perf_data_file output;
+ u64 bytes_written;
+ struct list_head samples;
};
struct event_entry {
@@ -42,21 +41,14 @@ static int perf_event__repipe_synth(struct perf_tool *tool,
union perf_event *event)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
- uint32_t size;
- void *buf = event;
+ ssize_t size;
- size = event->header.size;
-
- while (size) {
- int ret = write(inject->output, buf, size);
- if (ret < 0)
- return -errno;
-
- size -= ret;
- buf += ret;
- inject->bytes_written += ret;
- }
+ size = perf_data_file__write(&inject->output, event,
+ event->header.size);
+ if (size < 0)
+ return -errno;
+ inject->bytes_written += size;
return 0;
}
@@ -80,7 +72,7 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
if (ret)
return ret;
- if (!inject->pipe_output)
+ if (&inject->output.is_pipe)
return 0;
return perf_event__repipe_synth(tool, event);
@@ -355,6 +347,7 @@ static int __cmd_inject(struct perf_inject *inject)
.path = inject->input_name,
.mode = PERF_DATA_MODE_READ,
};
+ struct perf_data_file *file_out = &inject->output;
signal(SIGINT, sig_handler);
@@ -376,7 +369,7 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.ordered_samples = true;
- list_for_each_entry(evsel, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, evsel) {
const char *name = perf_evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch")) {
@@ -391,14 +384,14 @@ static int __cmd_inject(struct perf_inject *inject)
}
}
- if (!inject->pipe_output)
- lseek(inject->output, session->header.data_offset, SEEK_SET);
+ if (!file_out->is_pipe)
+ lseek(file_out->fd, session->header.data_offset, SEEK_SET);
ret = perf_session__process_events(session, &inject->tool);
- if (!inject->pipe_output) {
+ if (!file_out->is_pipe) {
session->header.data_size = inject->bytes_written;
- perf_session__write_header(session, session->evlist, inject->output, true);
+ perf_session__write_header(session, session->evlist, file_out->fd, true);
}
perf_session__delete(session);
@@ -427,14 +420,17 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
},
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
+ .output = {
+ .path = "-",
+ .mode = PERF_DATA_MODE_WRITE,
+ },
};
- const char *output_name = "-";
const struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
- OPT_STRING('o', "output", &output_name, "file",
+ OPT_STRING('o', "output", &inject.output.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
@@ -456,16 +452,9 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc)
usage_with_options(inject_usage, options);
- if (!strcmp(output_name, "-")) {
- inject.pipe_output = 1;
- inject.output = STDOUT_FILENO;
- } else {
- inject.output = open(output_name, O_CREAT | O_WRONLY | O_TRUNC,
- S_IRUSR | S_IWUSR);
- if (inject.output < 0) {
- perror("failed to create output file");
- return -1;
- }
+ if (perf_data_file__open(&inject.output)) {
+ perror("failed to create output file");
+ return -1;
}
if (symbol__init() < 0)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index f8bf5f2..a735051 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -13,7 +13,7 @@
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include "util/tool.h"
#include "util/stat.h"
#include "util/top.h"
@@ -89,7 +89,7 @@ struct exit_reasons_table {
struct perf_kvm_stat {
struct perf_tool tool;
- struct perf_record_opts opts;
+ struct record_opts opts;
struct perf_evlist *evlist;
struct perf_session *session;
@@ -1158,9 +1158,7 @@ out:
if (kvm->timerfd >= 0)
close(kvm->timerfd);
- if (pollfds)
- free(pollfds);
-
+ free(pollfds);
return err;
}
@@ -1176,7 +1174,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
* Note: exclude_{guest,host} do not apply here.
* This command processes KVM tracepoints from host only
*/
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
struct perf_event_attr *attr = &pos->attr;
/* make sure these *are* set */
@@ -1232,7 +1230,7 @@ static int read_events(struct perf_kvm_stat *kvm)
.ordered_samples = true,
};
struct perf_data_file file = {
- .path = input_name,
+ .path = kvm->file_name,
.mode = PERF_DATA_MODE_READ,
};
@@ -1558,10 +1556,8 @@ out:
if (kvm->session)
perf_session__delete(kvm->session);
kvm->session = NULL;
- if (kvm->evlist) {
- perf_evlist__delete_maps(kvm->evlist);
+ if (kvm->evlist)
perf_evlist__delete(kvm->evlist);
- }
return err;
}
@@ -1690,6 +1686,8 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
OPT_END()
};
@@ -1711,12 +1709,7 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
perf_guest = 1;
if (!file_name) {
- if (perf_host && !perf_guest)
- file_name = strdup("perf.data.host");
- else if (!perf_host && perf_guest)
- file_name = strdup("perf.data.guest");
- else
- file_name = strdup("perf.data.kvm");
+ file_name = get_filename_for_perf_kvm();
if (!file_name) {
pr_err("Failed to allocate memory for filename\n");
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 31c00f1..2e3ade69 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -62,7 +62,6 @@ static int
dump_raw_samples(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
struct perf_mem *mem = container_of(tool, struct perf_mem, tool);
@@ -112,10 +111,10 @@ dump_raw_samples(struct perf_tool *tool,
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel,
+ struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
- return dump_raw_samples(tool, event, sample, evsel, machine);
+ return dump_raw_samples(tool, event, sample, machine);
}
static int report_raw_events(struct perf_mem *mem)
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 6ea9e85..7894888 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -37,7 +37,7 @@
#include "util/strfilter.h"
#include "util/symbol.h"
#include "util/debug.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include "util/parse-options.h"
#include "util/probe-finder.h"
#include "util/probe-event.h"
@@ -59,7 +59,7 @@ static struct {
struct perf_probe_event events[MAX_PROBES];
struct strlist *dellist;
struct line_range line_range;
- const char *target;
+ char *target;
int max_probe_points;
struct strfilter *filter;
} params;
@@ -98,7 +98,10 @@ static int set_target(const char *ptr)
* short module name.
*/
if (!params.target && ptr && *ptr == '/') {
- params.target = ptr;
+ params.target = strdup(ptr);
+ if (!params.target)
+ return -ENOMEM;
+
found = 1;
buf = ptr + (strlen(ptr) - 3);
@@ -116,6 +119,9 @@ static int parse_probe_event_argv(int argc, const char **argv)
char *buf;
found_target = set_target(argv[0]);
+ if (found_target < 0)
+ return found_target;
+
if (found_target && argc == 1)
return 0;
@@ -169,6 +175,7 @@ static int opt_set_target(const struct option *opt, const char *str,
int unset __maybe_unused)
{
int ret = -ENOENT;
+ char *tmp;
if (str && !params.target) {
if (!strcmp(opt->long_name, "exec"))
@@ -180,7 +187,19 @@ static int opt_set_target(const struct option *opt, const char *str,
else
return ret;
- params.target = str;
+ /* Expand given path to absolute path, except for modulename */
+ if (params.uprobes || strchr(str, '/')) {
+ tmp = realpath(str, NULL);
+ if (!tmp) {
+ pr_warning("Failed to get the absolute path of %s: %m\n", str);
+ return ret;
+ }
+ } else {
+ tmp = strdup(str);
+ if (!tmp)
+ return -ENOMEM;
+ }
+ params.target = tmp;
ret = 0;
}
@@ -204,7 +223,6 @@ static int opt_show_lines(const struct option *opt __maybe_unused,
params.show_lines = true;
ret = parse_line_range_desc(str, &params.line_range);
- INIT_LIST_HEAD(&params.line_range.line_list);
return ret;
}
@@ -250,7 +268,28 @@ static int opt_set_filter(const struct option *opt __maybe_unused,
return 0;
}
-int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
+static void init_params(void)
+{
+ line_range__init(&params.line_range);
+}
+
+static void cleanup_params(void)
+{
+ int i;
+
+ for (i = 0; i < params.nevents; i++)
+ clear_perf_probe_event(params.events + i);
+ if (params.dellist)
+ strlist__delete(params.dellist);
+ line_range__clear(&params.line_range);
+ free(params.target);
+ if (params.filter)
+ strfilter__delete(params.filter);
+ memset(&params, 0, sizeof(params));
+}
+
+static int
+__cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const probe_usage[] = {
"perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
@@ -404,6 +443,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
ret = show_available_funcs(params.target, params.filter,
params.uprobes);
strfilter__delete(params.filter);
+ params.filter = NULL;
if (ret < 0)
pr_err(" Error: Failed to show functions."
" (%d)\n", ret);
@@ -411,7 +451,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
}
#ifdef HAVE_DWARF_SUPPORT
- if (params.show_lines && !params.uprobes) {
+ if (params.show_lines) {
if (params.mod_events) {
pr_err(" Error: Don't use --line with"
" --add/--del.\n");
@@ -443,6 +483,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
params.filter,
params.show_ext_vars);
strfilter__delete(params.filter);
+ params.filter = NULL;
if (ret < 0)
pr_err(" Error: Failed to show vars. (%d)\n", ret);
return ret;
@@ -451,7 +492,6 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
if (params.dellist) {
ret = del_perf_probe_events(params.dellist);
- strlist__delete(params.dellist);
if (ret < 0) {
pr_err(" Error: Failed to delete events. (%d)\n", ret);
return ret;
@@ -470,3 +510,14 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
}
return 0;
}
+
+int cmd_probe(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+
+ init_params();
+ ret = __cmd_probe(argc, argv, prefix);
+ cleanup_params();
+
+ return ret;
+}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7c8020a..3c394bf 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -62,9 +62,9 @@ static void __handle_on_exit_funcs(void)
}
#endif
-struct perf_record {
+struct record {
struct perf_tool tool;
- struct perf_record_opts opts;
+ struct record_opts opts;
u64 bytes_written;
struct perf_data_file file;
struct perf_evlist *evlist;
@@ -76,46 +76,27 @@ struct perf_record {
long samples;
};
-static int do_write_output(struct perf_record *rec, void *buf, size_t size)
+static int record__write(struct record *rec, void *bf, size_t size)
{
- struct perf_data_file *file = &rec->file;
-
- while (size) {
- ssize_t ret = write(file->fd, buf, size);
-
- if (ret < 0) {
- pr_err("failed to write perf data, error: %m\n");
- return -1;
- }
-
- size -= ret;
- buf += ret;
-
- rec->bytes_written += ret;
+ if (perf_data_file__write(rec->session->file, bf, size) < 0) {
+ pr_err("failed to write perf data, error: %m\n");
+ return -1;
}
+ rec->bytes_written += size;
return 0;
}
-static int write_output(struct perf_record *rec, void *buf, size_t size)
-{
- return do_write_output(rec, buf, size);
-}
-
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
- struct perf_record *rec = container_of(tool, struct perf_record, tool);
- if (write_output(rec, event, event->header.size) < 0)
- return -1;
-
- return 0;
+ struct record *rec = container_of(tool, struct record, tool);
+ return record__write(rec, event, event->header.size);
}
-static int perf_record__mmap_read(struct perf_record *rec,
- struct perf_mmap *md)
+static int record__mmap_read(struct record *rec, struct perf_mmap *md)
{
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
@@ -136,7 +117,7 @@ static int perf_record__mmap_read(struct perf_record *rec,
size = md->mask + 1 - (old & md->mask);
old += size;
- if (write_output(rec, buf, size) < 0) {
+ if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
@@ -146,7 +127,7 @@ static int perf_record__mmap_read(struct perf_record *rec,
size = head - old;
old += size;
- if (write_output(rec, buf, size) < 0) {
+ if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
@@ -171,9 +152,9 @@ static void sig_handler(int sig)
signr = sig;
}
-static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
+static void record__sig_exit(int exit_status __maybe_unused, void *arg)
{
- struct perf_record *rec = arg;
+ struct record *rec = arg;
int status;
if (rec->evlist->workload.pid > 0) {
@@ -191,18 +172,18 @@ static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
signal(signr, SIG_DFL);
}
-static int perf_record__open(struct perf_record *rec)
+static int record__open(struct record *rec)
{
char msg[512];
struct perf_evsel *pos;
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
- struct perf_record_opts *opts = &rec->opts;
+ struct record_opts *opts = &rec->opts;
int rc = 0;
perf_evlist__config(evlist, opts);
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
try_again:
if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
@@ -232,7 +213,7 @@ try_again:
"Consider increasing "
"/proc/sys/kernel/perf_event_mlock_kb,\n"
"or try again with a smaller value of -m/--mmap_pages.\n"
- "(current value: %d)\n", opts->mmap_pages);
+ "(current value: %u)\n", opts->mmap_pages);
rc = -errno;
} else {
pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
@@ -247,7 +228,7 @@ out:
return rc;
}
-static int process_buildids(struct perf_record *rec)
+static int process_buildids(struct record *rec)
{
struct perf_data_file *file = &rec->file;
struct perf_session *session = rec->session;
@@ -262,9 +243,9 @@ static int process_buildids(struct perf_record *rec)
size, &build_id__mark_dso_hit_ops);
}
-static void perf_record__exit(int status, void *arg)
+static void record__exit(int status, void *arg)
{
- struct perf_record *rec = arg;
+ struct record *rec = arg;
struct perf_data_file *file = &rec->file;
if (status != 0)
@@ -320,14 +301,14 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
-static int perf_record__mmap_read_all(struct perf_record *rec)
+static int record__mmap_read_all(struct record *rec)
{
int i;
int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
if (rec->evlist->mmap[i].base) {
- if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
+ if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
rc = -1;
goto out;
}
@@ -335,16 +316,14 @@ static int perf_record__mmap_read_all(struct perf_record *rec)
}
if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
- rc = write_output(rec, &finished_round_event,
- sizeof(finished_round_event));
+ rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
out:
return rc;
}
-static void perf_record__init_features(struct perf_record *rec)
+static void record__init_features(struct record *rec)
{
- struct perf_evlist *evsel_list = rec->evlist;
struct perf_session *session = rec->session;
int feat;
@@ -354,32 +333,46 @@ static void perf_record__init_features(struct perf_record *rec)
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
- if (!have_tracepoints(&evsel_list->entries))
+ if (!have_tracepoints(&rec->evlist->entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}
-static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
+static volatile int workload_exec_errno;
+
+/*
+ * perf_evlist__prepare_workload will send a SIGUSR1
+ * if the fork fails, since we asked by setting its
+ * want_signal to true.
+ */
+static void workload_exec_failed_signal(int signo, siginfo_t *info,
+ void *ucontext __maybe_unused)
+{
+ workload_exec_errno = info->si_value.sival_int;
+ done = 1;
+ signr = signo;
+ child_finished = 1;
+}
+
+static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
unsigned long waking = 0;
const bool forks = argc > 0;
struct machine *machine;
struct perf_tool *tool = &rec->tool;
- struct perf_record_opts *opts = &rec->opts;
- struct perf_evlist *evsel_list = rec->evlist;
+ struct record_opts *opts = &rec->opts;
struct perf_data_file *file = &rec->file;
struct perf_session *session;
bool disabled = false;
rec->progname = argv[0];
- on_exit(perf_record__sig_exit, rec);
+ on_exit(record__sig_exit, rec);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
- signal(SIGUSR1, sig_handler);
signal(SIGTERM, sig_handler);
session = perf_session__new(file, false, NULL);
@@ -390,37 +383,37 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
rec->session = session;
- perf_record__init_features(rec);
+ record__init_features(rec);
if (forks) {
- err = perf_evlist__prepare_workload(evsel_list, &opts->target,
+ err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
argv, file->is_pipe,
- true);
+ workload_exec_failed_signal);
if (err < 0) {
pr_err("Couldn't run the workload!\n");
goto out_delete_session;
}
}
- if (perf_record__open(rec) != 0) {
+ if (record__open(rec) != 0) {
err = -1;
goto out_delete_session;
}
- if (!evsel_list->nr_groups)
+ if (!rec->evlist->nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
/*
- * perf_session__delete(session) will be called at perf_record__exit()
+ * perf_session__delete(session) will be called at record__exit()
*/
- on_exit(perf_record__exit, rec);
+ on_exit(record__exit, rec);
if (file->is_pipe) {
err = perf_header__write_pipe(file->fd);
if (err < 0)
goto out_delete_session;
} else {
- err = perf_session__write_header(session, evsel_list,
+ err = perf_session__write_header(session, rec->evlist,
file->fd, false);
if (err < 0)
goto out_delete_session;
@@ -444,7 +437,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
goto out_delete_session;
}
- if (have_tracepoints(&evsel_list->entries)) {
+ if (have_tracepoints(&rec->evlist->entries)) {
/*
* FIXME err <= 0 here actually means that
* there were no tracepoints so its not really
@@ -453,7 +446,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
- err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
+ err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
@@ -485,7 +478,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
perf_event__synthesize_guest_os, tool);
}
- err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
+ err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address);
if (err != 0)
goto out_delete_session;
@@ -506,19 +499,24 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
* (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them.
*/
- if (!target__none(&opts->target))
- perf_evlist__enable(evsel_list);
+ if (!target__none(&opts->target) && !opts->initial_delay)
+ perf_evlist__enable(rec->evlist);
/*
* Let the child rip
*/
if (forks)
- perf_evlist__start_workload(evsel_list);
+ perf_evlist__start_workload(rec->evlist);
+
+ if (opts->initial_delay) {
+ usleep(opts->initial_delay * 1000);
+ perf_evlist__enable(rec->evlist);
+ }
for (;;) {
int hits = rec->samples;
- if (perf_record__mmap_read_all(rec) < 0) {
+ if (record__mmap_read_all(rec) < 0) {
err = -1;
goto out_delete_session;
}
@@ -526,7 +524,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
if (hits == rec->samples) {
if (done)
break;
- err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
+ err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
waking++;
}
@@ -536,11 +534,19 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
* disable events in this case.
*/
if (done && !disabled && !target__none(&opts->target)) {
- perf_evlist__disable(evsel_list);
+ perf_evlist__disable(rec->evlist);
disabled = true;
}
}
+ if (forks && workload_exec_errno) {
+ char msg[512];
+ const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
+ pr_err("Workload failed: %s\n", emsg);
+ err = -1;
+ goto out_delete_session;
+ }
+
if (quiet || signr == SIGUSR1)
return 0;
@@ -677,7 +683,7 @@ static int get_stack_size(char *str, unsigned long *_size)
}
#endif /* HAVE_LIBUNWIND_SUPPORT */
-int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
+int record_parse_callchain(const char *arg, struct record_opts *opts)
{
char *tok, *name, *saveptr = NULL;
char *buf;
@@ -733,7 +739,7 @@ int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
return ret;
}
-static void callchain_debug(struct perf_record_opts *opts)
+static void callchain_debug(struct record_opts *opts)
{
pr_debug("callchain: type %d\n", opts->call_graph);
@@ -746,7 +752,7 @@ int record_parse_callchain_opt(const struct option *opt,
const char *arg,
int unset)
{
- struct perf_record_opts *opts = opt->value;
+ struct record_opts *opts = opt->value;
int ret;
/* --no-call-graph */
@@ -767,7 +773,7 @@ int record_callchain_opt(const struct option *opt,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
- struct perf_record_opts *opts = opt->value;
+ struct record_opts *opts = opt->value;
if (opts->call_graph == CALLCHAIN_NONE)
opts->call_graph = CALLCHAIN_FP;
@@ -783,8 +789,8 @@ static const char * const record_usage[] = {
};
/*
- * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
- * because we need to have access to it in perf_record__exit, that is called
+ * XXX Ideally would be local to cmd_record() and passed to a record__new
+ * because we need to have access to it in record__exit, that is called
* after cmd_record() exits, but since record_options need to be accessible to
* builtin-script, leave it here.
*
@@ -792,7 +798,7 @@ static const char * const record_usage[] = {
*
* Just say no to tons of global variables, sigh.
*/
-static struct perf_record record = {
+static struct record record = {
.opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
@@ -800,6 +806,7 @@ static struct perf_record record = {
.freq = 4000,
.target = {
.uses_mmap = true,
+ .default_per_cpu = true,
},
},
};
@@ -815,7 +822,7 @@ const char record_callchain_help[] = CALLCHAIN_HELP "fp";
/*
* XXX Will stay a global variable till we fix builtin-script.c to stop messing
* with it and switch to use the library functions in perf_evlist that came
- * from builtin-record.c, i.e. use perf_record_opts,
+ * from builtin-record.c, i.e. use record_opts,
* perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
* using pipes, etc.
*/
@@ -831,7 +838,7 @@ const struct option record_options[] = {
"record events on existing thread id"),
OPT_INTEGER('r', "realtime", &record.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
- OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
+ OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
"collect data without buffering"),
OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
"collect raw sample records from all opened counters"),
@@ -842,8 +849,9 @@ const struct option record_options[] = {
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
OPT_STRING('o', "output", &record.file.path, "file",
"output file name"),
- OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
- "child tasks do not inherit counters"),
+ OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
+ &record.opts.no_inherit_set,
+ "child tasks do not inherit counters"),
OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
"number of mmap data pages",
@@ -874,6 +882,8 @@ const struct option record_options[] = {
OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
+ OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
+ "ms to wait before starting measurement after program start"),
OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
@@ -888,24 +898,21 @@ const struct option record_options[] = {
"sample by weight (on special events only)"),
OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
"sample transaction flags (special events only)"),
- OPT_BOOLEAN(0, "force-per-cpu", &record.opts.target.force_per_cpu,
- "force the use of per-cpu mmaps"),
+ OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
+ "use per-thread mmaps"),
OPT_END()
};
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
{
int err = -ENOMEM;
- struct perf_evlist *evsel_list;
- struct perf_record *rec = &record;
+ struct record *rec = &record;
char errbuf[BUFSIZ];
- evsel_list = perf_evlist__new();
- if (evsel_list == NULL)
+ rec->evlist = perf_evlist__new();
+ if (rec->evlist == NULL)
return -ENOMEM;
- rec->evlist = evsel_list;
-
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc && target__none(&rec->opts.target))
@@ -932,12 +939,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
if (rec->no_buildid_cache || rec->no_buildid)
disable_buildid_cache();
- if (evsel_list->nr_entries == 0 &&
- perf_evlist__add_default(evsel_list) < 0) {
+ if (rec->evlist->nr_entries == 0 &&
+ perf_evlist__add_default(rec->evlist) < 0) {
pr_err("Not enough memory for event selector list\n");
goto out_symbol_exit;
}
+ if (rec->opts.target.tid && !rec->opts.no_inherit_set)
+ rec->opts.no_inherit = true;
+
err = target__validate(&rec->opts.target);
if (err) {
target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
@@ -956,20 +966,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
}
err = -ENOMEM;
- if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
+ if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
usage_with_options(record_usage, record_options);
- if (perf_record_opts__config(&rec->opts)) {
+ if (record_opts__config(&rec->opts)) {
err = -EINVAL;
- goto out_free_fd;
+ goto out_symbol_exit;
}
err = __cmd_record(&record, argc, argv);
-
- perf_evlist__munmap(evsel_list);
- perf_evlist__close(evsel_list);
-out_free_fd:
- perf_evlist__delete_maps(evsel_list);
out_symbol_exit:
symbol__exit();
return err;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8cf8e66..3c53ec2 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -39,7 +39,7 @@
#include <dlfcn.h>
#include <linux/bitmap.h>
-struct perf_report {
+struct report {
struct perf_tool tool;
struct perf_session *session;
bool force, use_tui, use_gtk, use_stdio;
@@ -49,6 +49,8 @@ struct perf_report {
bool show_threads;
bool inverted_callchain;
bool mem_mode;
+ bool header;
+ bool header_only;
int max_stack;
struct perf_read_values show_threads_values;
const char *pretty_printing_style;
@@ -58,14 +60,14 @@ struct perf_report {
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
-static int perf_report_config(const char *var, const char *value, void *cb)
+static int report__config(const char *var, const char *value, void *cb)
{
if (!strcmp(var, "report.group")) {
symbol_conf.event_group = perf_config_bool(var, value);
return 0;
}
if (!strcmp(var, "report.percent-limit")) {
- struct perf_report *rep = cb;
+ struct report *rep = cb;
rep->min_percent = strtof(value, NULL);
return 0;
}
@@ -73,31 +75,22 @@ static int perf_report_config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
-static int perf_report__add_mem_hist_entry(struct perf_tool *tool,
- struct addr_location *al,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine,
- union perf_event *event)
+static int report__add_mem_hist_entry(struct perf_tool *tool, struct addr_location *al,
+ struct perf_sample *sample, struct perf_evsel *evsel,
+ union perf_event *event)
{
- struct perf_report *rep = container_of(tool, struct perf_report, tool);
+ struct report *rep = container_of(tool, struct report, tool);
struct symbol *parent = NULL;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- int err = 0;
struct hist_entry *he;
struct mem_info *mi, *mx;
uint64_t cost;
+ int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
- if ((sort__has_parent || symbol_conf.use_callchain) &&
- sample->callchain) {
- err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent, al,
- rep->max_stack);
- if (err)
- return err;
- }
+ if (err)
+ return err;
- mi = machine__resolve_mem(machine, al->thread, sample, cpumode);
+ mi = machine__resolve_mem(al->machine, al->thread, sample, cpumode);
if (!mi)
return -ENOMEM;
@@ -120,77 +113,36 @@ static int perf_report__add_mem_hist_entry(struct perf_tool *tool,
if (!he)
return -ENOMEM;
- /*
- * In the TUI browser, we are doing integrated annotation,
- * so we don't allocate the extra space needed because the stdio
- * code will not use it.
- */
- if (sort__has_sym && he->ms.sym && use_browser > 0) {
- struct annotation *notes = symbol__annotation(he->ms.sym);
-
- assert(evsel != NULL);
-
- if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
- goto out;
-
- err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
- if (err)
- goto out;
- }
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+ if (err)
+ goto out;
- if (sort__has_sym && he->mem_info->daddr.sym && use_browser > 0) {
- struct annotation *notes;
-
- mx = he->mem_info;
-
- notes = symbol__annotation(mx->daddr.sym);
- if (notes->src == NULL && symbol__alloc_hist(mx->daddr.sym) < 0)
- goto out;
-
- err = symbol__inc_addr_samples(mx->daddr.sym,
- mx->daddr.map,
- evsel->idx,
- mx->daddr.al_addr);
- if (err)
- goto out;
- }
+ mx = he->mem_info;
+ err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
+ if (err)
+ goto out;
evsel->hists.stats.total_period += cost;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
- err = 0;
-
- if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain,
- &callchain_cursor,
- sample->period);
- }
+ err = hist_entry__append_callchain(he, sample);
out:
return err;
}
-static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
- struct addr_location *al,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine)
+static int report__add_branch_hist_entry(struct perf_tool *tool, struct addr_location *al,
+ struct perf_sample *sample, struct perf_evsel *evsel)
{
- struct perf_report *rep = container_of(tool, struct perf_report, tool);
+ struct report *rep = container_of(tool, struct report, tool);
struct symbol *parent = NULL;
- int err = 0;
unsigned i;
struct hist_entry *he;
struct branch_info *bi, *bx;
+ int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
- if ((sort__has_parent || symbol_conf.use_callchain)
- && sample->callchain) {
- err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent, al,
- rep->max_stack);
- if (err)
- return err;
- }
+ if (err)
+ return err;
- bi = machine__resolve_bstack(machine, al->thread,
+ bi = machine__resolve_bstack(al->machine, al->thread,
sample->branch_stack);
if (!bi)
return -ENOMEM;
@@ -212,35 +164,15 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL,
1, 1, 0);
if (he) {
- struct annotation *notes;
bx = he->branch_info;
- if (bx->from.sym && use_browser == 1 && sort__has_sym) {
- notes = symbol__annotation(bx->from.sym);
- if (!notes->src
- && symbol__alloc_hist(bx->from.sym) < 0)
- goto out;
-
- err = symbol__inc_addr_samples(bx->from.sym,
- bx->from.map,
- evsel->idx,
- bx->from.al_addr);
- if (err)
- goto out;
- }
+ err = addr_map_symbol__inc_samples(&bx->from, evsel->idx);
+ if (err)
+ goto out;
+
+ err = addr_map_symbol__inc_samples(&bx->to, evsel->idx);
+ if (err)
+ goto out;
- if (bx->to.sym && use_browser == 1 && sort__has_sym) {
- notes = symbol__annotation(bx->to.sym);
- if (!notes->src
- && symbol__alloc_hist(bx->to.sym) < 0)
- goto out;
-
- err = symbol__inc_addr_samples(bx->to.sym,
- bx->to.map,
- evsel->idx,
- bx->to.al_addr);
- if (err)
- goto out;
- }
evsel->hists.stats.total_period += 1;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
} else
@@ -252,24 +184,16 @@ out:
return err;
}
-static int perf_evsel__add_hist_entry(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct addr_location *al,
- struct perf_sample *sample,
- struct machine *machine)
+static int report__add_hist_entry(struct perf_tool *tool, struct perf_evsel *evsel,
+ struct addr_location *al, struct perf_sample *sample)
{
- struct perf_report *rep = container_of(tool, struct perf_report, tool);
+ struct report *rep = container_of(tool, struct report, tool);
struct symbol *parent = NULL;
- int err = 0;
struct hist_entry *he;
+ int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
- if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
- err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent, al,
- rep->max_stack);
- if (err)
- return err;
- }
+ if (err)
+ return err;
he = __hists__add_entry(&evsel->hists, al, parent, NULL, NULL,
sample->period, sample->weight,
@@ -277,30 +201,11 @@ static int perf_evsel__add_hist_entry(struct perf_tool *tool,
if (he == NULL)
return -ENOMEM;
- if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain,
- &callchain_cursor,
- sample->period);
- if (err)
- return err;
- }
- /*
- * Only in the TUI browser we are doing integrated annotation,
- * so we don't allocated the extra space needed because the stdio
- * code will not use it.
- */
- if (he->ms.sym != NULL && use_browser == 1 && sort__has_sym) {
- struct annotation *notes = symbol__annotation(he->ms.sym);
-
- assert(evsel != NULL);
-
- err = -ENOMEM;
- if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
- goto out;
-
- err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
- }
+ err = hist_entry__append_callchain(he, sample);
+ if (err)
+ goto out;
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
out:
@@ -314,13 +219,13 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_evsel *evsel,
struct machine *machine)
{
- struct perf_report *rep = container_of(tool, struct perf_report, tool);
+ struct report *rep = container_of(tool, struct report, tool);
struct addr_location al;
int ret;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
- fprintf(stderr, "problem processing %d event, skipping it.\n",
- event->header.type);
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
return -1;
}
@@ -331,21 +236,18 @@ static int process_sample_event(struct perf_tool *tool,
return 0;
if (sort__mode == SORT_MODE__BRANCH) {
- ret = perf_report__add_branch_hist_entry(tool, &al, sample,
- evsel, machine);
+ ret = report__add_branch_hist_entry(tool, &al, sample, evsel);
if (ret < 0)
pr_debug("problem adding lbr entry, skipping event\n");
} else if (rep->mem_mode == 1) {
- ret = perf_report__add_mem_hist_entry(tool, &al, sample,
- evsel, machine, event);
+ ret = report__add_mem_hist_entry(tool, &al, sample, evsel, event);
if (ret < 0)
pr_debug("problem adding mem entry, skipping event\n");
} else {
if (al.map != NULL)
al.map->dso->hit = 1;
- ret = perf_evsel__add_hist_entry(tool, evsel, &al, sample,
- machine);
+ ret = report__add_hist_entry(tool, evsel, &al, sample);
if (ret < 0)
pr_debug("problem incrementing symbol period, skipping event\n");
}
@@ -358,7 +260,7 @@ static int process_read_event(struct perf_tool *tool,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused)
{
- struct perf_report *rep = container_of(tool, struct perf_report, tool);
+ struct report *rep = container_of(tool, struct report, tool);
if (rep->show_threads) {
const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
@@ -377,7 +279,7 @@ static int process_read_event(struct perf_tool *tool,
}
/* For pipe mode, sample_type is not currently set */
-static int perf_report__setup_sample_type(struct perf_report *rep)
+static int report__setup_sample_type(struct report *rep)
{
struct perf_session *session = rep->session;
u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
@@ -422,8 +324,7 @@ static void sig_handler(int sig __maybe_unused)
session_done = 1;
}
-static size_t hists__fprintf_nr_sample_events(struct perf_report *rep,
- struct hists *hists,
+static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
const char *evname, FILE *fp)
{
size_t ret;
@@ -460,12 +361,12 @@ static size_t hists__fprintf_nr_sample_events(struct perf_report *rep,
}
static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
- struct perf_report *rep,
+ struct report *rep,
const char *help)
{
struct perf_evsel *pos;
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
struct hists *hists = &pos->hists;
const char *evname = perf_evsel__name(pos);
@@ -473,7 +374,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
!perf_evsel__is_group_leader(pos))
continue;
- hists__fprintf_nr_sample_events(rep, hists, evname, stdout);
+ hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout);
fprintf(stdout, "\n\n");
}
@@ -493,43 +394,11 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
return 0;
}
-static int __cmd_report(struct perf_report *rep)
+static void report__warn_kptr_restrict(const struct report *rep)
{
- int ret = -EINVAL;
- u64 nr_samples;
- struct perf_session *session = rep->session;
- struct perf_evsel *pos;
- struct map *kernel_map;
- struct kmap *kernel_kmap;
- const char *help = "For a higher level overview, try: perf report --sort comm,dso";
- struct ui_progress prog;
- struct perf_data_file *file = session->file;
-
- signal(SIGINT, sig_handler);
+ struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION];
+ struct kmap *kernel_kmap = map__kmap(kernel_map);
- if (rep->cpu_list) {
- ret = perf_session__cpu_bitmap(session, rep->cpu_list,
- rep->cpu_bitmap);
- if (ret)
- return ret;
- }
-
- if (use_browser <= 0)
- perf_session__fprintf_info(session, stdout, rep->show_full_info);
-
- if (rep->show_threads)
- perf_read_values_init(&rep->show_threads_values);
-
- ret = perf_report__setup_sample_type(rep);
- if (ret)
- return ret;
-
- ret = perf_session__process_events(session, &rep->tool);
- if (ret)
- return ret;
-
- kernel_map = session->machines.host.vmlinux_maps[MAP__FUNCTION];
- kernel_kmap = map__kmap(kernel_map);
if (kernel_map == NULL ||
(kernel_map->dso->hit &&
(kernel_kmap->ref_reloc_sym == NULL ||
@@ -552,26 +421,73 @@ static int __cmd_report(struct perf_report *rep)
"Samples in kernel modules can't be resolved as well.\n\n",
desc);
}
+}
- if (verbose > 3)
- perf_session__fprintf(session, stdout);
+static int report__gtk_browse_hists(struct report *rep, const char *help)
+{
+ int (*hist_browser)(struct perf_evlist *evlist, const char *help,
+ struct hist_browser_timer *timer, float min_pcnt);
- if (verbose > 2)
- perf_session__fprintf_dsos(session, stdout);
+ hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists");
- if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- return 0;
+ if (hist_browser == NULL) {
+ ui__error("GTK browser not found!\n");
+ return -1;
}
- nr_samples = 0;
- list_for_each_entry(pos, &session->evlist->entries, node)
+ return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
+}
+
+static int report__browse_hists(struct report *rep)
+{
+ int ret;
+ struct perf_session *session = rep->session;
+ struct perf_evlist *evlist = session->evlist;
+ const char *help = "For a higher level overview, try: perf report --sort comm,dso";
+
+ switch (use_browser) {
+ case 1:
+ ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
+ rep->min_percent,
+ &session->header.env);
+ /*
+ * Usually "ret" is the last pressed key, and we only
+ * care if the key notifies us to switch data file.
+ */
+ if (ret != K_SWITCH_INPUT_DATA)
+ ret = 0;
+ break;
+ case 2:
+ ret = report__gtk_browse_hists(rep, help);
+ break;
+ default:
+ ret = perf_evlist__tty_browse_hists(evlist, rep, help);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 report__collapse_hists(struct report *rep)
+{
+ struct ui_progress prog;
+ struct perf_evsel *pos;
+ u64 nr_samples = 0;
+ /*
+ * Count number of histogram entries to use when showing progress,
+ * reusing nr_samples variable.
+ */
+ evlist__for_each(rep->session->evlist, pos)
nr_samples += pos->hists.nr_entries;
ui_progress__init(&prog, nr_samples, "Merging related events...");
-
+ /*
+ * Count total number of samples, will be used to check if this
+ * session had any.
+ */
nr_samples = 0;
- list_for_each_entry(pos, &session->evlist->entries, node) {
+
+ evlist__for_each(rep->session->evlist, pos) {
struct hists *hists = &pos->hists;
if (pos->idx == 0)
@@ -589,8 +505,57 @@ static int __cmd_report(struct perf_report *rep)
hists__link(leader_hists, hists);
}
}
+
ui_progress__finish();
+ return nr_samples;
+}
+
+static int __cmd_report(struct report *rep)
+{
+ int ret;
+ u64 nr_samples;
+ struct perf_session *session = rep->session;
+ struct perf_evsel *pos;
+ struct perf_data_file *file = session->file;
+
+ signal(SIGINT, sig_handler);
+
+ if (rep->cpu_list) {
+ ret = perf_session__cpu_bitmap(session, rep->cpu_list,
+ rep->cpu_bitmap);
+ if (ret)
+ return ret;
+ }
+
+ if (rep->show_threads)
+ perf_read_values_init(&rep->show_threads_values);
+
+ ret = report__setup_sample_type(rep);
+ if (ret)
+ return ret;
+
+ ret = perf_session__process_events(session, &rep->tool);
+ if (ret)
+ return ret;
+
+ report__warn_kptr_restrict(rep);
+
+ if (use_browser == 0) {
+ if (verbose > 3)
+ perf_session__fprintf(session, stdout);
+
+ if (verbose > 2)
+ perf_session__fprintf_dsos(session, stdout);
+
+ if (dump_trace) {
+ perf_session__fprintf_nr_events(session, stdout);
+ return 0;
+ }
+ }
+
+ nr_samples = report__collapse_hists(rep);
+
if (session_done())
return 0;
@@ -599,47 +564,16 @@ static int __cmd_report(struct perf_report *rep)
return 0;
}
- list_for_each_entry(pos, &session->evlist->entries, node)
+ evlist__for_each(session->evlist, pos)
hists__output_resort(&pos->hists);
- if (use_browser > 0) {
- if (use_browser == 1) {
- ret = perf_evlist__tui_browse_hists(session->evlist,
- help, NULL,
- rep->min_percent,
- &session->header.env);
- /*
- * Usually "ret" is the last pressed key, and we only
- * care if the key notifies us to switch data file.
- */
- if (ret != K_SWITCH_INPUT_DATA)
- ret = 0;
-
- } else if (use_browser == 2) {
- int (*hist_browser)(struct perf_evlist *,
- const char *,
- struct hist_browser_timer *,
- float min_pcnt);
-
- hist_browser = dlsym(perf_gtk_handle,
- "perf_evlist__gtk_browse_hists");
- if (hist_browser == NULL) {
- ui__error("GTK browser not found!\n");
- return ret;
- }
- hist_browser(session->evlist, help, NULL,
- rep->min_percent);
- }
- } else
- perf_evlist__tty_browse_hists(session->evlist, rep, help);
-
- return ret;
+ return report__browse_hists(rep);
}
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
- struct perf_report *rep = (struct perf_report *)opt->value;
+ struct report *rep = (struct report *)opt->value;
char *tok, *tok2;
char *endptr;
@@ -721,7 +655,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
return -1;
setup:
if (callchain_register_param(&callchain_param) < 0) {
- fprintf(stderr, "Can't register callchain params\n");
+ pr_err("Can't register callchain params\n");
return -1;
}
return 0;
@@ -759,7 +693,7 @@ static int
parse_percent_limit(const struct option *opt, const char *str,
int unset __maybe_unused)
{
- struct perf_report *rep = opt->value;
+ struct report *rep = opt->value;
rep->min_percent = strtof(str, NULL);
return 0;
@@ -777,7 +711,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"perf report [<options>]",
NULL
};
- struct perf_report report = {
+ struct report report = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
@@ -820,6 +754,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
"Use the stdio interface"),
+ OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
+ OPT_BOOLEAN(0, "header-only", &report.header_only,
+ "Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline,"
" dso_to, dso_from, symbol_to, symbol_from, mispredict,"
@@ -890,7 +827,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.mode = PERF_DATA_MODE_READ,
};
- perf_config(perf_report_config, &report);
+ perf_config(report__config, &report);
argc = parse_options(argc, argv, options, report_usage, 0);
@@ -940,7 +877,7 @@ repeat:
}
if (report.mem_mode) {
if (sort__mode == SORT_MODE__BRANCH) {
- fprintf(stderr, "branch and mem mode incompatible\n");
+ pr_err("branch and mem mode incompatible\n");
goto error;
}
sort__mode = SORT_MODE__MEMORY;
@@ -963,6 +900,10 @@ repeat:
goto error;
}
+ /* Force tty output for header output. */
+ if (report.header || report.header_only)
+ use_browser = 0;
+
if (strcmp(input_name, "-") != 0)
setup_browser(true);
else {
@@ -970,6 +911,16 @@ repeat:
perf_hpp__init();
}
+ if (report.header || report.header_only) {
+ perf_session__fprintf_info(session, stdout,
+ report.show_full_info);
+ if (report.header_only)
+ return 0;
+ } else if (use_browser == 0) {
+ fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
+ stdout);
+ }
+
/*
* Only in the TUI browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 0f3c6551..6a76a07 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -469,7 +469,7 @@ static void *thread_func(void *ctx)
char comm2[22];
int fd;
- free(parms);
+ zfree(&parms);
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index baf1798..9e9c91f 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -43,6 +43,7 @@ enum perf_output_field {
PERF_OUTPUT_DSO = 1U << 9,
PERF_OUTPUT_ADDR = 1U << 10,
PERF_OUTPUT_SYMOFFSET = 1U << 11,
+ PERF_OUTPUT_SRCLINE = 1U << 12,
};
struct output_option {
@@ -61,6 +62,7 @@ struct output_option {
{.str = "dso", .field = PERF_OUTPUT_DSO},
{.str = "addr", .field = PERF_OUTPUT_ADDR},
{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
+ {.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
};
/* default set to maintain compatibility with current format */
@@ -210,6 +212,11 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
"to DSO.\n");
return -EINVAL;
}
+ if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) {
+ pr_err("Display of source line number requested but sample IP is not\n"
+ "selected. Hence, no address to lookup the source line number.\n");
+ return -EINVAL;
+ }
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID",
@@ -245,6 +252,9 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
if (PRINT_FIELD(SYMOFFSET))
output[type].print_ip_opts |= PRINT_IP_OPT_SYMOFFSET;
+
+ if (PRINT_FIELD(SRCLINE))
+ output[type].print_ip_opts |= PRINT_IP_OPT_SRCLINE;
}
/*
@@ -280,6 +290,30 @@ static int perf_session__check_output_opt(struct perf_session *session)
set_print_ip_opts(&evsel->attr);
}
+ /*
+ * set default for tracepoints to print symbols only
+ * if callchains are present
+ */
+ if (symbol_conf.use_callchain &&
+ !output[PERF_TYPE_TRACEPOINT].user_set) {
+ struct perf_event_attr *attr;
+
+ j = PERF_TYPE_TRACEPOINT;
+ evsel = perf_session__find_first_evtype(session, j);
+ if (evsel == NULL)
+ goto out;
+
+ attr = &evsel->attr;
+
+ if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
+ output[j].fields |= PERF_OUTPUT_IP;
+ output[j].fields |= PERF_OUTPUT_SYM;
+ output[j].fields |= PERF_OUTPUT_DSO;
+ set_print_ip_opts(attr);
+ }
+ }
+
+out:
return 0;
}
@@ -288,7 +322,6 @@ static void print_sample_start(struct perf_sample *sample,
struct perf_evsel *evsel)
{
struct perf_event_attr *attr = &evsel->attr;
- const char *evname = NULL;
unsigned long secs;
unsigned long usecs;
unsigned long long nsecs;
@@ -323,11 +356,6 @@ static void print_sample_start(struct perf_sample *sample,
usecs = nsecs / NSECS_PER_USEC;
printf("%5lu.%06lu: ", secs, usecs);
}
-
- if (PRINT_FIELD(EVNAME)) {
- evname = perf_evsel__name(evsel);
- printf("%s: ", evname ? evname : "[unknown]");
- }
}
static bool is_bts_event(struct perf_event_attr *attr)
@@ -395,8 +423,8 @@ static void print_sample_addr(union perf_event *event,
static void print_sample_bts(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine,
- struct thread *thread)
+ struct thread *thread,
+ struct addr_location *al)
{
struct perf_event_attr *attr = &evsel->attr;
@@ -406,7 +434,7 @@ static void print_sample_bts(union perf_event *event,
printf(" ");
else
printf("\n");
- perf_evsel__print_ip(evsel, event, sample, machine,
+ perf_evsel__print_ip(evsel, sample, al,
output[attr->type].print_ip_opts,
PERF_MAX_STACK_DEPTH);
}
@@ -417,15 +445,14 @@ static void print_sample_bts(union perf_event *event,
if (PRINT_FIELD(ADDR) ||
((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[attr->type].user_set))
- print_sample_addr(event, sample, machine, thread, attr);
+ print_sample_addr(event, sample, al->machine, thread, attr);
printf("\n");
}
static void process_event(union perf_event *event, struct perf_sample *sample,
- struct perf_evsel *evsel, struct machine *machine,
- struct thread *thread,
- struct addr_location *al __maybe_unused)
+ struct perf_evsel *evsel, struct thread *thread,
+ struct addr_location *al)
{
struct perf_event_attr *attr = &evsel->attr;
@@ -434,8 +461,13 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
print_sample_start(sample, thread, evsel);
+ if (PRINT_FIELD(EVNAME)) {
+ const char *evname = perf_evsel__name(evsel);
+ printf("%s: ", evname ? evname : "[unknown]");
+ }
+
if (is_bts_event(attr)) {
- print_sample_bts(event, sample, evsel, machine, thread);
+ print_sample_bts(event, sample, evsel, thread, al);
return;
}
@@ -443,7 +475,7 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
event_format__print(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size);
if (PRINT_FIELD(ADDR))
- print_sample_addr(event, sample, machine, thread, attr);
+ print_sample_addr(event, sample, al->machine, thread, attr);
if (PRINT_FIELD(IP)) {
if (!symbol_conf.use_callchain)
@@ -451,7 +483,7 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
else
printf("\n");
- perf_evsel__print_ip(evsel, event, sample, machine,
+ perf_evsel__print_ip(evsel, sample, al,
output[attr->type].print_ip_opts,
PERF_MAX_STACK_DEPTH);
}
@@ -540,7 +572,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, sample, evsel, machine, thread, &al);
+ scripting_ops->process_event(event, sample, evsel, thread, &al);
evsel->hists.stats.total_period += sample->period;
return 0;
@@ -549,6 +581,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_script {
struct perf_tool tool;
struct perf_session *session;
+ bool show_task_events;
+ bool show_mmap_events;
};
static int process_attr(struct perf_tool *tool, union perf_event *event,
@@ -569,7 +603,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
if (evsel->attr.type >= PERF_TYPE_MAX)
return 0;
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
if (pos->attr.type == evsel->attr.type && pos != evsel)
return 0;
}
@@ -579,6 +613,163 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
return perf_evsel__check_attr(evsel, scr->session);
}
+static int process_comm_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+ int ret = -1;
+
+ thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing COMM event, skipping it.\n");
+ return -1;
+ }
+
+ if (perf_event__process_comm(tool, event, sample, machine) < 0)
+ goto out;
+
+ if (!evsel->attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = 0;
+ sample->tid = event->comm.tid;
+ sample->pid = event->comm.pid;
+ }
+ print_sample_start(sample, thread, evsel);
+ perf_event__fprintf(event, stdout);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int process_fork_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+
+ if (perf_event__process_fork(tool, event, sample, machine) < 0)
+ return -1;
+
+ thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing FORK event, skipping it.\n");
+ return -1;
+ }
+
+ if (!evsel->attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = event->fork.time;
+ sample->tid = event->fork.tid;
+ sample->pid = event->fork.pid;
+ }
+ print_sample_start(sample, thread, evsel);
+ perf_event__fprintf(event, stdout);
+
+ return 0;
+}
+static int process_exit_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+
+ thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing EXIT event, skipping it.\n");
+ return -1;
+ }
+
+ if (!evsel->attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = 0;
+ sample->tid = event->comm.tid;
+ sample->pid = event->comm.pid;
+ }
+ print_sample_start(sample, thread, evsel);
+ perf_event__fprintf(event, stdout);
+
+ if (perf_event__process_exit(tool, event, sample, machine) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int process_mmap_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+
+ if (perf_event__process_mmap(tool, event, sample, machine) < 0)
+ return -1;
+
+ thread = machine__findnew_thread(machine, event->mmap.pid, event->mmap.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing MMAP event, skipping it.\n");
+ return -1;
+ }
+
+ if (!evsel->attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = 0;
+ sample->tid = event->mmap.tid;
+ sample->pid = event->mmap.pid;
+ }
+ print_sample_start(sample, thread, evsel);
+ perf_event__fprintf(event, stdout);
+
+ return 0;
+}
+
+static int process_mmap2_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+
+ if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
+ return -1;
+
+ thread = machine__findnew_thread(machine, event->mmap2.pid, event->mmap2.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing MMAP2 event, skipping it.\n");
+ return -1;
+ }
+
+ if (!evsel->attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = 0;
+ sample->tid = event->mmap2.tid;
+ sample->pid = event->mmap2.pid;
+ }
+ print_sample_start(sample, thread, evsel);
+ perf_event__fprintf(event, stdout);
+
+ return 0;
+}
+
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
@@ -590,6 +781,17 @@ static int __cmd_script(struct perf_script *script)
signal(SIGINT, sig_handler);
+ /* override event processing functions */
+ if (script->show_task_events) {
+ script->tool.comm = process_comm_event;
+ script->tool.fork = process_fork_event;
+ script->tool.exit = process_exit_event;
+ }
+ if (script->show_mmap_events) {
+ script->tool.mmap = process_mmap_event;
+ script->tool.mmap2 = process_mmap2_event;
+ }
+
ret = perf_session__process_events(script->session, &script->tool);
if (debug_mode)
@@ -900,9 +1102,9 @@ static struct script_desc *script_desc__new(const char *name)
static void script_desc__delete(struct script_desc *s)
{
- free(s->name);
- free(s->half_liner);
- free(s->args);
+ zfree(&s->name);
+ zfree(&s->half_liner);
+ zfree(&s->args);
free(s);
}
@@ -1107,8 +1309,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
snprintf(evname, len + 1, "%s", p);
match = 0;
- list_for_each_entry(pos,
- &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, pos) {
if (!strcmp(perf_evsel__name(pos), evname)) {
match = 1;
break;
@@ -1290,6 +1491,8 @@ static int have_cmd(int argc, const char **argv)
int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool show_full_info = false;
+ bool header = false;
+ bool header_only = false;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
@@ -1328,6 +1531,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('d', "debug-mode", &debug_mode,
"do various checks like samples ordering and lost events"),
+ OPT_BOOLEAN(0, "header", &header, "Show data header."),
+ OPT_BOOLEAN(0, "header-only", &header_only, "Show only data header."),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
@@ -1352,6 +1557,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
"Show the path of [kernel.kallsyms]"),
+ OPT_BOOLEAN('\0', "show-task-events", &script.show_task_events,
+ "Show the fork/comm/exit events"),
+ OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
+ "Show the mmap events"),
OPT_END()
};
const char * const script_usage[] = {
@@ -1540,6 +1749,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
if (session == NULL)
return -ENOMEM;
+ if (header || header_only) {
+ perf_session__fprintf_info(session, stdout, show_full_info);
+ if (header_only)
+ return 0;
+ }
+
script.session = session;
if (cpu_list) {
@@ -1547,9 +1762,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
- if (!script_name && !generate_script_lang)
- perf_session__fprintf_info(session, stdout, show_full_info);
-
if (!no_callchain)
symbol_conf.use_callchain = true;
else
@@ -1588,7 +1800,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
- err = scripting_ops->generate_script(session->pevent,
+ err = scripting_ops->generate_script(session->tevent.pevent,
"perf-script");
goto out;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ee0d565..8b0e1c9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -138,6 +138,7 @@ static const char *post_cmd = NULL;
static bool sync_run = false;
static unsigned int interval = 0;
static unsigned int initial_delay = 0;
+static unsigned int unit_width = 4; /* strlen("unit") */
static bool forever = false;
static struct timespec ref_time;
static struct cpu_map *aggr_map;
@@ -184,8 +185,7 @@ static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
- free(evsel->priv);
- evsel->priv = NULL;
+ zfree(&evsel->priv);
}
static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
@@ -207,15 +207,14 @@ static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
{
- free(evsel->prev_raw_counts);
- evsel->prev_raw_counts = NULL;
+ zfree(&evsel->prev_raw_counts);
}
static void perf_evlist__free_stats(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
perf_evsel__free_stat_priv(evsel);
perf_evsel__free_counts(evsel);
perf_evsel__free_prev_raw_counts(evsel);
@@ -226,7 +225,7 @@ static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 ||
(alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0))
@@ -260,7 +259,7 @@ static void perf_stat__reset_stats(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
perf_evsel__reset_stat_priv(evsel);
perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel));
}
@@ -327,13 +326,13 @@ static struct perf_evsel *nth_evsel(int n)
/* Assumes this only called when evsel_list does not change anymore. */
if (!array) {
- list_for_each_entry(ev, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, ev)
array_len++;
array = malloc(array_len * sizeof(void *));
if (!array)
exit(ENOMEM);
j = 0;
- list_for_each_entry(ev, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, ev)
array[j++] = ev;
}
if (n < array_len)
@@ -441,13 +440,13 @@ static void print_interval(void)
char prefix[64];
if (aggr_mode == AGGR_GLOBAL) {
- list_for_each_entry(counter, &evsel_list->entries, node) {
+ evlist__for_each(evsel_list, counter) {
ps = counter->priv;
memset(ps->res_stats, 0, sizeof(ps->res_stats));
read_counter_aggr(counter);
}
} else {
- list_for_each_entry(counter, &evsel_list->entries, node) {
+ evlist__for_each(evsel_list, counter) {
ps = counter->priv;
memset(ps->res_stats, 0, sizeof(ps->res_stats));
read_counter(counter);
@@ -461,17 +460,17 @@ static void print_interval(void)
if (num_print_interval == 0 && !csv_output) {
switch (aggr_mode) {
case AGGR_SOCKET:
- fprintf(output, "# time socket cpus counts events\n");
+ fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit");
break;
case AGGR_CORE:
- fprintf(output, "# time core cpus counts events\n");
+ fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit");
break;
case AGGR_NONE:
- fprintf(output, "# time CPU counts events\n");
+ fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit");
break;
case AGGR_GLOBAL:
default:
- fprintf(output, "# time counts events\n");
+ fprintf(output, "# time counts %*s events\n", unit_width, "unit");
}
}
@@ -484,12 +483,12 @@ static void print_interval(void)
print_aggr(prefix);
break;
case AGGR_NONE:
- list_for_each_entry(counter, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, counter)
print_counter(counter, prefix);
break;
case AGGR_GLOBAL:
default:
- list_for_each_entry(counter, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, counter)
print_counter_aggr(counter, prefix);
}
@@ -505,17 +504,31 @@ static void handle_initial_delay(void)
nthreads = thread_map__nr(evsel_list->threads);
usleep(initial_delay * 1000);
- list_for_each_entry(counter, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, counter)
perf_evsel__enable(counter, ncpus, nthreads);
}
}
+static volatile int workload_exec_errno;
+
+/*
+ * perf_evlist__prepare_workload will send a SIGUSR1
+ * if the fork fails, since we asked by setting its
+ * want_signal to true.
+ */
+static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
+ void *ucontext __maybe_unused)
+{
+ workload_exec_errno = info->si_value.sival_int;
+}
+
static int __run_perf_stat(int argc, const char **argv)
{
char msg[512];
unsigned long long t0, t1;
struct perf_evsel *counter;
struct timespec ts;
+ size_t l;
int status = 0;
const bool forks = (argc > 0);
@@ -528,8 +541,8 @@ static int __run_perf_stat(int argc, const char **argv)
}
if (forks) {
- if (perf_evlist__prepare_workload(evsel_list, &target, argv,
- false, false) < 0) {
+ if (perf_evlist__prepare_workload(evsel_list, &target, argv, false,
+ workload_exec_failed_signal) < 0) {
perror("failed to prepare workload");
return -1;
}
@@ -539,7 +552,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (group)
perf_evlist__set_leader(evsel_list);
- list_for_each_entry(counter, &evsel_list->entries, node) {
+ evlist__for_each(evsel_list, counter) {
if (create_perf_stat_counter(counter) < 0) {
/*
* PPC returns ENXIO for HW counters until 2.6.37
@@ -565,6 +578,10 @@ static int __run_perf_stat(int argc, const char **argv)
return -1;
}
counter->supported = true;
+
+ l = strlen(counter->unit);
+ if (l > unit_width)
+ unit_width = l;
}
if (perf_evlist__apply_filters(evsel_list)) {
@@ -590,6 +607,13 @@ static int __run_perf_stat(int argc, const char **argv)
}
}
wait(&status);
+
+ if (workload_exec_errno) {
+ const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
+ pr_err("Workload failed: %s\n", emsg);
+ return -1;
+ }
+
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
@@ -606,13 +630,13 @@ static int __run_perf_stat(int argc, const char **argv)
update_stats(&walltime_nsecs_stats, t1 - t0);
if (aggr_mode == AGGR_GLOBAL) {
- list_for_each_entry(counter, &evsel_list->entries, node) {
+ evlist__for_each(evsel_list, counter) {
read_counter_aggr(counter);
perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
thread_map__nr(evsel_list->threads));
}
} else {
- list_for_each_entry(counter, &evsel_list->entries, node) {
+ evlist__for_each(evsel_list, counter) {
read_counter(counter);
perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1);
}
@@ -621,7 +645,7 @@ static int __run_perf_stat(int argc, const char **argv)
return WEXITSTATUS(status);
}
-static int run_perf_stat(int argc __maybe_unused, const char **argv)
+static int run_perf_stat(int argc, const char **argv)
{
int ret;
@@ -704,14 +728,25 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
{
double msecs = avg / 1e6;
- const char *fmt = csv_output ? "%.6f%s%s" : "%18.6f%s%-25s";
+ const char *fmt_v, *fmt_n;
char name[25];
+ fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
+ fmt_n = csv_output ? "%s" : "%-25s";
+
aggr_printout(evsel, cpu, nr);
scnprintf(name, sizeof(name), "%s%s",
perf_evsel__name(evsel), csv_output ? "" : " (msec)");
- fprintf(output, fmt, msecs, csv_sep, name);
+
+ fprintf(output, fmt_v, msecs, csv_sep);
+
+ if (csv_output)
+ fprintf(output, "%s%s", evsel->unit, csv_sep);
+ else
+ fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
+
+ fprintf(output, fmt_n, name);
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -908,21 +943,31 @@ static void print_ll_cache_misses(int cpu,
static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
{
double total, ratio = 0.0, total2;
+ double sc = evsel->scale;
const char *fmt;
- if (csv_output)
- fmt = "%.0f%s%s";
- else if (big_num)
- fmt = "%'18.0f%s%-25s";
- else
- fmt = "%18.0f%s%-25s";
+ if (csv_output) {
+ fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
+ } else {
+ if (big_num)
+ fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
+ else
+ fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
+ }
aggr_printout(evsel, cpu, nr);
if (aggr_mode == AGGR_GLOBAL)
cpu = 0;
- fprintf(output, fmt, avg, csv_sep, perf_evsel__name(evsel));
+ fprintf(output, fmt, avg, csv_sep);
+
+ if (evsel->unit)
+ fprintf(output, "%-*s%s",
+ csv_output ? 0 : unit_width,
+ evsel->unit, csv_sep);
+
+ fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -941,7 +986,10 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
if (total && avg) {
ratio = total / avg;
- fprintf(output, "\n # %5.2f stalled cycles per insn", ratio);
+ fprintf(output, "\n");
+ if (aggr_mode == AGGR_NONE)
+ fprintf(output, " ");
+ fprintf(output, " # %5.2f stalled cycles per insn", ratio);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
@@ -1061,6 +1109,7 @@ static void print_aggr(char *prefix)
{
struct perf_evsel *counter;
int cpu, cpu2, s, s2, id, nr;
+ double uval;
u64 ena, run, val;
if (!(aggr_map || aggr_get_id))
@@ -1068,7 +1117,7 @@ static void print_aggr(char *prefix)
for (s = 0; s < aggr_map->nr; s++) {
id = aggr_map->map[s];
- list_for_each_entry(counter, &evsel_list->entries, node) {
+ evlist__for_each(evsel_list, counter) {
val = ena = run = 0;
nr = 0;
for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
@@ -1087,11 +1136,17 @@ static void print_aggr(char *prefix)
if (run == 0 || ena == 0) {
aggr_printout(counter, id, nr);
- fprintf(output, "%*s%s%*s",
+ fprintf(output, "%*s%s",
csv_output ? 0 : 18,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep,
- csv_output ? 0 : -24,
+ csv_sep);
+
+ fprintf(output, "%-*s%s",
+ csv_output ? 0 : unit_width,
+ counter->unit, csv_sep);
+
+ fprintf(output, "%*s",
+ csv_output ? 0 : -25,
perf_evsel__name(counter));
if (counter->cgrp)
@@ -1101,11 +1156,12 @@ static void print_aggr(char *prefix)
fputc('\n', output);
continue;
}
+ uval = val * counter->scale;
if (nsec_counter(counter))
- nsec_printout(id, nr, counter, val);
+ nsec_printout(id, nr, counter, uval);
else
- abs_printout(id, nr, counter, val);
+ abs_printout(id, nr, counter, uval);
if (!csv_output) {
print_noise(counter, 1.0);
@@ -1128,16 +1184,21 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
struct perf_stat *ps = counter->priv;
double avg = avg_stats(&ps->res_stats[0]);
int scaled = counter->counts->scaled;
+ double uval;
if (prefix)
fprintf(output, "%s", prefix);
if (scaled == -1) {
- fprintf(output, "%*s%s%*s",
+ fprintf(output, "%*s%s",
csv_output ? 0 : 18,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep,
- csv_output ? 0 : -24,
+ csv_sep);
+ fprintf(output, "%-*s%s",
+ csv_output ? 0 : unit_width,
+ counter->unit, csv_sep);
+ fprintf(output, "%*s",
+ csv_output ? 0 : -25,
perf_evsel__name(counter));
if (counter->cgrp)
@@ -1147,10 +1208,12 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
return;
}
+ uval = avg * counter->scale;
+
if (nsec_counter(counter))
- nsec_printout(-1, 0, counter, avg);
+ nsec_printout(-1, 0, counter, uval);
else
- abs_printout(-1, 0, counter, avg);
+ abs_printout(-1, 0, counter, uval);
print_noise(counter, avg);
@@ -1177,6 +1240,7 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
static void print_counter(struct perf_evsel *counter, char *prefix)
{
u64 ena, run, val;
+ double uval;
int cpu;
for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
@@ -1188,14 +1252,20 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
if (run == 0 || ena == 0) {
- fprintf(output, "CPU%*d%s%*s%s%*s",
+ fprintf(output, "CPU%*d%s%*s%s",
csv_output ? 0 : -4,
perf_evsel__cpus(counter)->map[cpu], csv_sep,
csv_output ? 0 : 18,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep,
- csv_output ? 0 : -24,
- perf_evsel__name(counter));
+ csv_sep);
+
+ fprintf(output, "%-*s%s",
+ csv_output ? 0 : unit_width,
+ counter->unit, csv_sep);
+
+ fprintf(output, "%*s",
+ csv_output ? 0 : -25,
+ perf_evsel__name(counter));
if (counter->cgrp)
fprintf(output, "%s%s",
@@ -1205,10 +1275,12 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
continue;
}
+ uval = val * counter->scale;
+
if (nsec_counter(counter))
- nsec_printout(cpu, 0, counter, val);
+ nsec_printout(cpu, 0, counter, uval);
else
- abs_printout(cpu, 0, counter, val);
+ abs_printout(cpu, 0, counter, uval);
if (!csv_output) {
print_noise(counter, 1.0);
@@ -1256,11 +1328,11 @@ static void print_stat(int argc, const char **argv)
print_aggr(NULL);
break;
case AGGR_GLOBAL:
- list_for_each_entry(counter, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, counter)
print_counter_aggr(counter, NULL);
break;
case AGGR_NONE:
- list_for_each_entry(counter, &evsel_list->entries, node)
+ evlist__for_each(evsel_list, counter)
print_counter(counter, NULL);
break;
default:
@@ -1710,14 +1782,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (interval && interval < 100) {
pr_err("print interval must be >= 100ms\n");
parse_options_usage(stat_usage, options, "I", 1);
- goto out_free_maps;
+ goto out;
}
if (perf_evlist__alloc_stats(evsel_list, interval))
- goto out_free_maps;
+ goto out;
if (perf_stat_init_aggr_mode())
- goto out_free_maps;
+ goto out;
/*
* We dont want to block the signals - that would cause
@@ -1749,8 +1821,6 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
print_stat(argc, argv);
perf_evlist__free_stats(evsel_list);
-out_free_maps:
- perf_evlist__delete_maps(evsel_list);
out:
perf_evlist__delete(evsel_list);
return status;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 41c9bde2..652af0b 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -41,25 +41,29 @@
#define SUPPORT_OLD_POWER_EVENTS 1
#define PWR_EVENT_EXIT -1
-
-static unsigned int numcpus;
-static u64 min_freq; /* Lowest CPU frequency seen */
-static u64 max_freq; /* Highest CPU frequency seen */
-static u64 turbo_frequency;
-
-static u64 first_time, last_time;
-
-static bool power_only;
-
-
struct per_pid;
-struct per_pidcomm;
-
-struct cpu_sample;
struct power_event;
struct wake_event;
-struct sample_wrapper;
+struct timechart {
+ struct perf_tool tool;
+ struct per_pid *all_data;
+ struct power_event *power_events;
+ struct wake_event *wake_events;
+ int proc_num;
+ unsigned int numcpus;
+ u64 min_freq, /* Lowest CPU frequency seen */
+ max_freq, /* Highest CPU frequency seen */
+ turbo_frequency,
+ first_time, last_time;
+ bool power_only,
+ tasks_only,
+ with_backtrace,
+ topology;
+};
+
+struct per_pidcomm;
+struct cpu_sample;
/*
* Datastructure layout:
@@ -124,10 +128,9 @@ struct cpu_sample {
u64 end_time;
int type;
int cpu;
+ const char *backtrace;
};
-static struct per_pid *all_data;
-
#define CSTATE 1
#define PSTATE 2
@@ -145,12 +148,9 @@ struct wake_event {
int waker;
int wakee;
u64 time;
+ const char *backtrace;
};
-static struct power_event *power_events;
-static struct wake_event *wake_events;
-
-struct process_filter;
struct process_filter {
char *name;
int pid;
@@ -160,9 +160,9 @@ struct process_filter {
static struct process_filter *process_filter;
-static struct per_pid *find_create_pid(int pid)
+static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
{
- struct per_pid *cursor = all_data;
+ struct per_pid *cursor = tchart->all_data;
while (cursor) {
if (cursor->pid == pid)
@@ -172,16 +172,16 @@ static struct per_pid *find_create_pid(int pid)
cursor = zalloc(sizeof(*cursor));
assert(cursor != NULL);
cursor->pid = pid;
- cursor->next = all_data;
- all_data = cursor;
+ cursor->next = tchart->all_data;
+ tchart->all_data = cursor;
return cursor;
}
-static void pid_set_comm(int pid, char *comm)
+static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
{
struct per_pid *p;
struct per_pidcomm *c;
- p = find_create_pid(pid);
+ p = find_create_pid(tchart, pid);
c = p->all;
while (c) {
if (c->comm && strcmp(c->comm, comm) == 0) {
@@ -203,14 +203,14 @@ static void pid_set_comm(int pid, char *comm)
p->all = c;
}
-static void pid_fork(int pid, int ppid, u64 timestamp)
+static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
{
struct per_pid *p, *pp;
- p = find_create_pid(pid);
- pp = find_create_pid(ppid);
+ p = find_create_pid(tchart, pid);
+ pp = find_create_pid(tchart, ppid);
p->ppid = ppid;
if (pp->current && pp->current->comm && !p->current)
- pid_set_comm(pid, pp->current->comm);
+ pid_set_comm(tchart, pid, pp->current->comm);
p->start_time = timestamp;
if (p->current) {
@@ -219,23 +219,24 @@ static void pid_fork(int pid, int ppid, u64 timestamp)
}
}
-static void pid_exit(int pid, u64 timestamp)
+static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
{
struct per_pid *p;
- p = find_create_pid(pid);
+ p = find_create_pid(tchart, pid);
p->end_time = timestamp;
if (p->current)
p->current->end_time = timestamp;
}
-static void
-pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
+static void pid_put_sample(struct timechart *tchart, int pid, int type,
+ unsigned int cpu, u64 start, u64 end,
+ const char *backtrace)
{
struct per_pid *p;
struct per_pidcomm *c;
struct cpu_sample *sample;
- p = find_create_pid(pid);
+ p = find_create_pid(tchart, pid);
c = p->current;
if (!c) {
c = zalloc(sizeof(*c));
@@ -252,6 +253,7 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
sample->type = type;
sample->next = c->samples;
sample->cpu = cpu;
+ sample->backtrace = backtrace;
c->samples = sample;
if (sample->type == TYPE_RUNNING && end > start && start > 0) {
@@ -272,84 +274,47 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int process_comm_event(struct perf_tool *tool __maybe_unused,
+static int process_comm_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
- pid_set_comm(event->comm.tid, event->comm.comm);
+ struct timechart *tchart = container_of(tool, struct timechart, tool);
+ pid_set_comm(tchart, event->comm.tid, event->comm.comm);
return 0;
}
-static int process_fork_event(struct perf_tool *tool __maybe_unused,
+static int process_fork_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
- pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
+ struct timechart *tchart = container_of(tool, struct timechart, tool);
+ pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int process_exit_event(struct perf_tool *tool __maybe_unused,
+static int process_exit_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
- pid_exit(event->fork.pid, event->fork.time);
+ struct timechart *tchart = container_of(tool, struct timechart, tool);
+ pid_exit(tchart, event->fork.pid, event->fork.time);
return 0;
}
-struct trace_entry {
- unsigned short type;
- unsigned char flags;
- unsigned char preempt_count;
- int pid;
- int lock_depth;
-};
-
#ifdef SUPPORT_OLD_POWER_EVENTS
static int use_old_power_events;
-struct power_entry_old {
- struct trace_entry te;
- u64 type;
- u64 value;
- u64 cpu_id;
-};
#endif
-struct power_processor_entry {
- struct trace_entry te;
- u32 state;
- u32 cpu_id;
-};
-
-#define TASK_COMM_LEN 16
-struct wakeup_entry {
- struct trace_entry te;
- char comm[TASK_COMM_LEN];
- int pid;
- int prio;
- int success;
-};
-
-struct sched_switch {
- struct trace_entry te;
- char prev_comm[TASK_COMM_LEN];
- int prev_pid;
- int prev_prio;
- long prev_state; /* Arjan weeps. */
- char next_comm[TASK_COMM_LEN];
- int next_pid;
- int next_prio;
-};
-
static void c_state_start(int cpu, u64 timestamp, int state)
{
cpus_cstate_start_times[cpu] = timestamp;
cpus_cstate_state[cpu] = state;
}
-static void c_state_end(int cpu, u64 timestamp)
+static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
{
struct power_event *pwr = zalloc(sizeof(*pwr));
@@ -361,12 +326,12 @@ static void c_state_end(int cpu, u64 timestamp)
pwr->end_time = timestamp;
pwr->cpu = cpu;
pwr->type = CSTATE;
- pwr->next = power_events;
+ pwr->next = tchart->power_events;
- power_events = pwr;
+ tchart->power_events = pwr;
}
-static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
+static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
{
struct power_event *pwr;
@@ -382,73 +347,78 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
pwr->end_time = timestamp;
pwr->cpu = cpu;
pwr->type = PSTATE;
- pwr->next = power_events;
+ pwr->next = tchart->power_events;
if (!pwr->start_time)
- pwr->start_time = first_time;
+ pwr->start_time = tchart->first_time;
- power_events = pwr;
+ tchart->power_events = pwr;
cpus_pstate_state[cpu] = new_freq;
cpus_pstate_start_times[cpu] = timestamp;
- if ((u64)new_freq > max_freq)
- max_freq = new_freq;
+ if ((u64)new_freq > tchart->max_freq)
+ tchart->max_freq = new_freq;
- if (new_freq < min_freq || min_freq == 0)
- min_freq = new_freq;
+ if (new_freq < tchart->min_freq || tchart->min_freq == 0)
+ tchart->min_freq = new_freq;
- if (new_freq == max_freq - 1000)
- turbo_frequency = max_freq;
+ if (new_freq == tchart->max_freq - 1000)
+ tchart->turbo_frequency = tchart->max_freq;
}
-static void
-sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
+static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
+ int waker, int wakee, u8 flags, const char *backtrace)
{
struct per_pid *p;
- struct wakeup_entry *wake = (void *)te;
struct wake_event *we = zalloc(sizeof(*we));
if (!we)
return;
we->time = timestamp;
- we->waker = pid;
+ we->waker = waker;
+ we->backtrace = backtrace;
- if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
+ if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
we->waker = -1;
- we->wakee = wake->pid;
- we->next = wake_events;
- wake_events = we;
- p = find_create_pid(we->wakee);
+ we->wakee = wakee;
+ we->next = tchart->wake_events;
+ tchart->wake_events = we;
+ p = find_create_pid(tchart, we->wakee);
if (p && p->current && p->current->state == TYPE_NONE) {
p->current->state_since = timestamp;
p->current->state = TYPE_WAITING;
}
if (p && p->current && p->current->state == TYPE_BLOCKED) {
- pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
+ pid_put_sample(tchart, p->pid, p->current->state, cpu,
+ p->current->state_since, timestamp, NULL);
p->current->state_since = timestamp;
p->current->state = TYPE_WAITING;
}
}
-static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
+static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
+ int prev_pid, int next_pid, u64 prev_state,
+ const char *backtrace)
{
struct per_pid *p = NULL, *prev_p;
- struct sched_switch *sw = (void *)te;
-
- prev_p = find_create_pid(sw->prev_pid);
+ prev_p = find_create_pid(tchart, prev_pid);
- p = find_create_pid(sw->next_pid);
+ p = find_create_pid(tchart, next_pid);
if (prev_p->current && prev_p->current->state != TYPE_NONE)
- pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
+ pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
+ prev_p->current->state_since, timestamp,
+ backtrace);
if (p && p->current) {
if (p->current->state != TYPE_NONE)
- pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
+ pid_put_sample(tchart, next_pid, p->current->state, cpu,
+ p->current->state_since, timestamp,
+ backtrace);
p->current->state_since = timestamp;
p->current->state = TYPE_RUNNING;
@@ -457,109 +427,211 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
if (prev_p->current) {
prev_p->current->state = TYPE_NONE;
prev_p->current->state_since = timestamp;
- if (sw->prev_state & 2)
+ if (prev_state & 2)
prev_p->current->state = TYPE_BLOCKED;
- if (sw->prev_state == 0)
+ if (prev_state == 0)
prev_p->current->state = TYPE_WAITING;
}
}
-typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
- struct perf_sample *sample);
+static const char *cat_backtrace(union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct addr_location al;
+ unsigned int i;
+ char *p = NULL;
+ size_t p_len;
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ struct addr_location tal;
+ struct ip_callchain *chain = sample->callchain;
+ FILE *f = open_memstream(&p, &p_len);
+
+ if (!f) {
+ perror("open_memstream error");
+ return NULL;
+ }
+
+ if (!chain)
+ goto exit;
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
+ fprintf(stderr, "problem processing %d event, skipping it.\n",
+ event->header.type);
+ goto exit;
+ }
+
+ for (i = 0; i < chain->nr; i++) {
+ u64 ip;
+
+ if (callchain_param.order == ORDER_CALLEE)
+ ip = chain->ips[i];
+ else
+ ip = chain->ips[chain->nr - i - 1];
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER;
+ break;
+ default:
+ pr_debug("invalid callchain context: "
+ "%"PRId64"\n", (s64) ip);
+
+ /*
+ * It seems the callchain is corrupted.
+ * Discard all.
+ */
+ zfree(&p);
+ goto exit;
+ }
+ continue;
+ }
+
+ tal.filtered = false;
+ thread__find_addr_location(al.thread, machine, cpumode,
+ MAP__FUNCTION, ip, &tal);
+
+ if (tal.sym)
+ fprintf(f, "..... %016" PRIx64 " %s\n", ip,
+ tal.sym->name);
+ else
+ fprintf(f, "..... %016" PRIx64 "\n", ip);
+ }
+
+exit:
+ fclose(f);
+
+ return p;
+}
+
+typedef int (*tracepoint_handler)(struct timechart *tchart,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace);
+
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine __maybe_unused)
+ struct machine *machine)
{
+ struct timechart *tchart = container_of(tool, struct timechart, tool);
+
if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
- if (!first_time || first_time > sample->time)
- first_time = sample->time;
- if (last_time < sample->time)
- last_time = sample->time;
+ if (!tchart->first_time || tchart->first_time > sample->time)
+ tchart->first_time = sample->time;
+ if (tchart->last_time < sample->time)
+ tchart->last_time = sample->time;
}
- if (sample->cpu > numcpus)
- numcpus = sample->cpu;
-
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
- return f(evsel, sample);
+ return f(tchart, evsel, sample,
+ cat_backtrace(event, sample, machine));
}
return 0;
}
static int
-process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace __maybe_unused)
{
- struct power_processor_entry *ppe = sample->raw_data;
+ u32 state = perf_evsel__intval(evsel, sample, "state");
+ u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
- if (ppe->state == (u32) PWR_EVENT_EXIT)
- c_state_end(ppe->cpu_id, sample->time);
+ if (state == (u32)PWR_EVENT_EXIT)
+ c_state_end(tchart, cpu_id, sample->time);
else
- c_state_start(ppe->cpu_id, sample->time, ppe->state);
+ c_state_start(cpu_id, sample->time, state);
return 0;
}
static int
-process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_cpu_frequency(struct timechart *tchart,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace __maybe_unused)
{
- struct power_processor_entry *ppe = sample->raw_data;
+ u32 state = perf_evsel__intval(evsel, sample, "state");
+ u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
- p_state_change(ppe->cpu_id, sample->time, ppe->state);
+ p_state_change(tchart, cpu_id, sample->time, state);
return 0;
}
static int
-process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_sched_wakeup(struct timechart *tchart,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace)
{
- struct trace_entry *te = sample->raw_data;
+ u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
+ int waker = perf_evsel__intval(evsel, sample, "common_pid");
+ int wakee = perf_evsel__intval(evsel, sample, "pid");
- sched_wakeup(sample->cpu, sample->time, sample->pid, te);
+ sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
return 0;
}
static int
-process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_sched_switch(struct timechart *tchart,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace)
{
- struct trace_entry *te = sample->raw_data;
+ int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
+ int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
- sched_switch(sample->cpu, sample->time, te);
+ sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
+ prev_state, backtrace);
return 0;
}
#ifdef SUPPORT_OLD_POWER_EVENTS
static int
-process_sample_power_start(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_power_start(struct timechart *tchart __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace __maybe_unused)
{
- struct power_entry_old *peo = sample->raw_data;
+ u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
+ u64 value = perf_evsel__intval(evsel, sample, "value");
- c_state_start(peo->cpu_id, sample->time, peo->value);
+ c_state_start(cpu_id, sample->time, value);
return 0;
}
static int
-process_sample_power_end(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_power_end(struct timechart *tchart,
+ struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample,
+ const char *backtrace __maybe_unused)
{
- c_state_end(sample->cpu, sample->time);
+ c_state_end(tchart, sample->cpu, sample->time);
return 0;
}
static int
-process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample)
+process_sample_power_frequency(struct timechart *tchart,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *backtrace __maybe_unused)
{
- struct power_entry_old *peo = sample->raw_data;
+ u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
+ u64 value = perf_evsel__intval(evsel, sample, "value");
- p_state_change(peo->cpu_id, sample->time, peo->value);
+ p_state_change(tchart, cpu_id, sample->time, value);
return 0;
}
#endif /* SUPPORT_OLD_POWER_EVENTS */
@@ -568,12 +640,12 @@ process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused,
* After the last sample we need to wrap up the current C/P state
* and close out each CPU for these.
*/
-static void end_sample_processing(void)
+static void end_sample_processing(struct timechart *tchart)
{
u64 cpu;
struct power_event *pwr;
- for (cpu = 0; cpu <= numcpus; cpu++) {
+ for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
/* C state */
#if 0
pwr = zalloc(sizeof(*pwr));
@@ -582,12 +654,12 @@ static void end_sample_processing(void)
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
- pwr->end_time = last_time;
+ pwr->end_time = tchart->last_time;
pwr->cpu = cpu;
pwr->type = CSTATE;
- pwr->next = power_events;
+ pwr->next = tchart->power_events;
- power_events = pwr;
+ tchart->power_events = pwr;
#endif
/* P state */
@@ -597,32 +669,32 @@ static void end_sample_processing(void)
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
- pwr->end_time = last_time;
+ pwr->end_time = tchart->last_time;
pwr->cpu = cpu;
pwr->type = PSTATE;
- pwr->next = power_events;
+ pwr->next = tchart->power_events;
if (!pwr->start_time)
- pwr->start_time = first_time;
+ pwr->start_time = tchart->first_time;
if (!pwr->state)
- pwr->state = min_freq;
- power_events = pwr;
+ pwr->state = tchart->min_freq;
+ tchart->power_events = pwr;
}
}
/*
* Sort the pid datastructure
*/
-static void sort_pids(void)
+static void sort_pids(struct timechart *tchart)
{
struct per_pid *new_list, *p, *cursor, *prev;
/* sort by ppid first, then by pid, lowest to highest */
new_list = NULL;
- while (all_data) {
- p = all_data;
- all_data = p->next;
+ while (tchart->all_data) {
+ p = tchart->all_data;
+ tchart->all_data = p->next;
p->next = NULL;
if (new_list == NULL) {
@@ -655,14 +727,14 @@ static void sort_pids(void)
prev->next = p;
}
}
- all_data = new_list;
+ tchart->all_data = new_list;
}
-static void draw_c_p_states(void)
+static void draw_c_p_states(struct timechart *tchart)
{
struct power_event *pwr;
- pwr = power_events;
+ pwr = tchart->power_events;
/*
* two pass drawing so that the P state bars are on top of the C state blocks
@@ -673,30 +745,30 @@ static void draw_c_p_states(void)
pwr = pwr->next;
}
- pwr = power_events;
+ pwr = tchart->power_events;
while (pwr) {
if (pwr->type == PSTATE) {
if (!pwr->state)
- pwr->state = min_freq;
+ pwr->state = tchart->min_freq;
svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
}
pwr = pwr->next;
}
}
-static void draw_wakeups(void)
+static void draw_wakeups(struct timechart *tchart)
{
struct wake_event *we;
struct per_pid *p;
struct per_pidcomm *c;
- we = wake_events;
+ we = tchart->wake_events;
while (we) {
int from = 0, to = 0;
char *task_from = NULL, *task_to = NULL;
/* locate the column of the waker and wakee */
- p = all_data;
+ p = tchart->all_data;
while (p) {
if (p->pid == we->waker || p->pid == we->wakee) {
c = p->all;
@@ -739,11 +811,12 @@ static void draw_wakeups(void)
}
if (we->waker == -1)
- svg_interrupt(we->time, to);
+ svg_interrupt(we->time, to, we->backtrace);
else if (from && to && abs(from - to) == 1)
- svg_wakeline(we->time, from, to);
+ svg_wakeline(we->time, from, to, we->backtrace);
else
- svg_partial_wakeline(we->time, from, task_from, to, task_to);
+ svg_partial_wakeline(we->time, from, task_from, to,
+ task_to, we->backtrace);
we = we->next;
free(task_from);
@@ -751,19 +824,25 @@ static void draw_wakeups(void)
}
}
-static void draw_cpu_usage(void)
+static void draw_cpu_usage(struct timechart *tchart)
{
struct per_pid *p;
struct per_pidcomm *c;
struct cpu_sample *sample;
- p = all_data;
+ p = tchart->all_data;
while (p) {
c = p->all;
while (c) {
sample = c->samples;
while (sample) {
- if (sample->type == TYPE_RUNNING)
- svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
+ if (sample->type == TYPE_RUNNING) {
+ svg_process(sample->cpu,
+ sample->start_time,
+ sample->end_time,
+ p->pid,
+ c->comm,
+ sample->backtrace);
+ }
sample = sample->next;
}
@@ -773,16 +852,16 @@ static void draw_cpu_usage(void)
}
}
-static void draw_process_bars(void)
+static void draw_process_bars(struct timechart *tchart)
{
struct per_pid *p;
struct per_pidcomm *c;
struct cpu_sample *sample;
int Y = 0;
- Y = 2 * numcpus + 2;
+ Y = 2 * tchart->numcpus + 2;
- p = all_data;
+ p = tchart->all_data;
while (p) {
c = p->all;
while (c) {
@@ -796,11 +875,20 @@ static void draw_process_bars(void)
sample = c->samples;
while (sample) {
if (sample->type == TYPE_RUNNING)
- svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
+ svg_running(Y, sample->cpu,
+ sample->start_time,
+ sample->end_time,
+ sample->backtrace);
if (sample->type == TYPE_BLOCKED)
- svg_box(Y, sample->start_time, sample->end_time, "blocked");
+ svg_blocked(Y, sample->cpu,
+ sample->start_time,
+ sample->end_time,
+ sample->backtrace);
if (sample->type == TYPE_WAITING)
- svg_waiting(Y, sample->start_time, sample->end_time);
+ svg_waiting(Y, sample->cpu,
+ sample->start_time,
+ sample->end_time,
+ sample->backtrace);
sample = sample->next;
}
@@ -853,21 +941,21 @@ static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
return 0;
}
-static int determine_display_tasks_filtered(void)
+static int determine_display_tasks_filtered(struct timechart *tchart)
{
struct per_pid *p;
struct per_pidcomm *c;
int count = 0;
- p = all_data;
+ p = tchart->all_data;
while (p) {
p->display = 0;
if (p->start_time == 1)
- p->start_time = first_time;
+ p->start_time = tchart->first_time;
/* no exit marker, task kept running to the end */
if (p->end_time == 0)
- p->end_time = last_time;
+ p->end_time = tchart->last_time;
c = p->all;
@@ -875,7 +963,7 @@ static int determine_display_tasks_filtered(void)
c->display = 0;
if (c->start_time == 1)
- c->start_time = first_time;
+ c->start_time = tchart->first_time;
if (passes_filter(p, c)) {
c->display = 1;
@@ -884,7 +972,7 @@ static int determine_display_tasks_filtered(void)
}
if (c->end_time == 0)
- c->end_time = last_time;
+ c->end_time = tchart->last_time;
c = c->next;
}
@@ -893,25 +981,25 @@ static int determine_display_tasks_filtered(void)
return count;
}
-static int determine_display_tasks(u64 threshold)
+static int determine_display_tasks(struct timechart *tchart, u64 threshold)
{
struct per_pid *p;
struct per_pidcomm *c;
int count = 0;
if (process_filter)
- return determine_display_tasks_filtered();
+ return determine_display_tasks_filtered(tchart);
- p = all_data;
+ p = tchart->all_data;
while (p) {
p->display = 0;
if (p->start_time == 1)
- p->start_time = first_time;
+ p->start_time = tchart->first_time;
/* no exit marker, task kept running to the end */
if (p->end_time == 0)
- p->end_time = last_time;
- if (p->total_time >= threshold && !power_only)
+ p->end_time = tchart->last_time;
+ if (p->total_time >= threshold)
p->display = 1;
c = p->all;
@@ -920,15 +1008,15 @@ static int determine_display_tasks(u64 threshold)
c->display = 0;
if (c->start_time == 1)
- c->start_time = first_time;
+ c->start_time = tchart->first_time;
- if (c->total_time >= threshold && !power_only) {
+ if (c->total_time >= threshold) {
c->display = 1;
count++;
}
if (c->end_time == 0)
- c->end_time = last_time;
+ c->end_time = tchart->last_time;
c = c->next;
}
@@ -941,45 +1029,74 @@ static int determine_display_tasks(u64 threshold)
#define TIME_THRESH 10000000
-static void write_svg_file(const char *filename)
+static void write_svg_file(struct timechart *tchart, const char *filename)
{
u64 i;
int count;
+ int thresh = TIME_THRESH;
- numcpus++;
-
+ if (tchart->power_only)
+ tchart->proc_num = 0;
- count = determine_display_tasks(TIME_THRESH);
+ /* We'd like to show at least proc_num tasks;
+ * be less picky if we have fewer */
+ do {
+ count = determine_display_tasks(tchart, thresh);
+ thresh /= 10;
+ } while (!process_filter && thresh && count < tchart->proc_num);
- /* We'd like to show at least 15 tasks; be less picky if we have fewer */
- if (count < 15)
- count = determine_display_tasks(TIME_THRESH / 10);
-
- open_svg(filename, numcpus, count, first_time, last_time);
+ open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
svg_time_grid();
svg_legenda();
- for (i = 0; i < numcpus; i++)
- svg_cpu_box(i, max_freq, turbo_frequency);
+ for (i = 0; i < tchart->numcpus; i++)
+ svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
- draw_cpu_usage();
- draw_process_bars();
- draw_c_p_states();
- draw_wakeups();
+ draw_cpu_usage(tchart);
+ if (tchart->proc_num)
+ draw_process_bars(tchart);
+ if (!tchart->tasks_only)
+ draw_c_p_states(tchart);
+ if (tchart->proc_num)
+ draw_wakeups(tchart);
svg_close();
}
-static int __cmd_timechart(const char *output_name)
+static int process_header(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph,
+ int feat,
+ int fd __maybe_unused,
+ void *data)
+{
+ struct timechart *tchart = data;
+
+ switch (feat) {
+ case HEADER_NRCPUS:
+ tchart->numcpus = ph->env.nr_cpus_avail;
+ break;
+
+ case HEADER_CPU_TOPOLOGY:
+ if (!tchart->topology)
+ break;
+
+ if (svg_build_topology_map(ph->env.sibling_cores,
+ ph->env.nr_sibling_cores,
+ ph->env.sibling_threads,
+ ph->env.nr_sibling_threads))
+ fprintf(stderr, "problem building topology\n");
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{
- struct perf_tool perf_timechart = {
- .comm = process_comm_event,
- .fork = process_fork_event,
- .exit = process_exit_event,
- .sample = process_sample_event,
- .ordered_samples = true,
- };
const struct perf_evsel_str_handler power_tracepoints[] = {
{ "power:cpu_idle", process_sample_cpu_idle },
{ "power:cpu_frequency", process_sample_cpu_frequency },
@@ -997,12 +1114,17 @@ static int __cmd_timechart(const char *output_name)
};
struct perf_session *session = perf_session__new(&file, false,
- &perf_timechart);
+ &tchart->tool);
int ret = -EINVAL;
if (session == NULL)
return -ENOMEM;
+ (void)perf_header__process_sections(&session->header,
+ perf_data_file__fd(session->file),
+ tchart,
+ process_header);
+
if (!perf_session__has_traces(session, "timechart record"))
goto out_delete;
@@ -1012,69 +1134,111 @@ static int __cmd_timechart(const char *output_name)
goto out_delete;
}
- ret = perf_session__process_events(session, &perf_timechart);
+ ret = perf_session__process_events(session, &tchart->tool);
if (ret)
goto out_delete;
- end_sample_processing();
+ end_sample_processing(tchart);
- sort_pids();
+ sort_pids(tchart);
- write_svg_file(output_name);
+ write_svg_file(tchart, output_name);
pr_info("Written %2.1f seconds of trace to %s.\n",
- (last_time - first_time) / 1000000000.0, output_name);
+ (tchart->last_time - tchart->first_time) / 1000000000.0, output_name);
out_delete:
perf_session__delete(session);
return ret;
}
-static int __cmd_record(int argc, const char **argv)
+static int timechart__record(struct timechart *tchart, int argc, const char **argv)
{
-#ifdef SUPPORT_OLD_POWER_EVENTS
- const char * const record_old_args[] = {
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+ const char **p;
+ unsigned int record_elems;
+
+ const char * const common_args[] = {
"record", "-a", "-R", "-c", "1",
+ };
+ unsigned int common_args_nr = ARRAY_SIZE(common_args);
+
+ const char * const backtrace_args[] = {
+ "-g",
+ };
+ unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
+
+ const char * const power_args[] = {
+ "-e", "power:cpu_frequency",
+ "-e", "power:cpu_idle",
+ };
+ unsigned int power_args_nr = ARRAY_SIZE(power_args);
+
+ const char * const old_power_args[] = {
+#ifdef SUPPORT_OLD_POWER_EVENTS
"-e", "power:power_start",
"-e", "power:power_end",
"-e", "power:power_frequency",
- "-e", "sched:sched_wakeup",
- "-e", "sched:sched_switch",
- };
#endif
- const char * const record_new_args[] = {
- "record", "-a", "-R", "-c", "1",
- "-e", "power:cpu_frequency",
- "-e", "power:cpu_idle",
+ };
+ unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
+
+ const char * const tasks_args[] = {
"-e", "sched:sched_wakeup",
"-e", "sched:sched_switch",
};
- unsigned int rec_argc, i, j;
- const char **rec_argv;
- const char * const *record_args = record_new_args;
- unsigned int record_elems = ARRAY_SIZE(record_new_args);
+ unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
#ifdef SUPPORT_OLD_POWER_EVENTS
if (!is_valid_tracepoint("power:cpu_idle") &&
is_valid_tracepoint("power:power_start")) {
use_old_power_events = 1;
- record_args = record_old_args;
- record_elems = ARRAY_SIZE(record_old_args);
+ power_args_nr = 0;
+ } else {
+ old_power_args_nr = 0;
}
#endif
- rec_argc = record_elems + argc - 1;
+ if (tchart->power_only)
+ tasks_args_nr = 0;
+
+ if (tchart->tasks_only) {
+ power_args_nr = 0;
+ old_power_args_nr = 0;
+ }
+
+ if (!tchart->with_backtrace)
+ backtrace_args_no = 0;
+
+ record_elems = common_args_nr + tasks_args_nr +
+ power_args_nr + old_power_args_nr + backtrace_args_no;
+
+ rec_argc = record_elems + argc;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
- for (i = 0; i < record_elems; i++)
- rec_argv[i] = strdup(record_args[i]);
+ p = rec_argv;
+ for (i = 0; i < common_args_nr; i++)
+ *p++ = strdup(common_args[i]);
+
+ for (i = 0; i < backtrace_args_no; i++)
+ *p++ = strdup(backtrace_args[i]);
+
+ for (i = 0; i < tasks_args_nr; i++)
+ *p++ = strdup(tasks_args[i]);
+
+ for (i = 0; i < power_args_nr; i++)
+ *p++ = strdup(power_args[i]);
- for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
+ for (i = 0; i < old_power_args_nr; i++)
+ *p++ = strdup(old_power_args[i]);
- return cmd_record(i, rec_argv, NULL);
+ for (j = 1; j < (unsigned int)argc; j++)
+ *p++ = argv[j];
+
+ return cmd_record(rec_argc, rec_argv, NULL);
}
static int
@@ -1086,20 +1250,56 @@ parse_process(const struct option *opt __maybe_unused, const char *arg,
return 0;
}
+static int
+parse_highlight(const struct option *opt __maybe_unused, const char *arg,
+ int __maybe_unused unset)
+{
+ unsigned long duration = strtoul(arg, NULL, 0);
+
+ if (svg_highlight || svg_highlight_name)
+ return -1;
+
+ if (duration)
+ svg_highlight = duration;
+ else
+ svg_highlight_name = strdup(arg);
+
+ return 0;
+}
+
int cmd_timechart(int argc, const char **argv,
const char *prefix __maybe_unused)
{
+ struct timechart tchart = {
+ .tool = {
+ .comm = process_comm_event,
+ .fork = process_fork_event,
+ .exit = process_exit_event,
+ .sample = process_sample_event,
+ .ordered_samples = true,
+ },
+ .proc_num = 15,
+ };
const char *output_name = "output.svg";
- const struct option options[] = {
+ const struct option timechart_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_INTEGER('w', "width", &svg_page_width, "page width"),
- OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"),
+ OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
+ "highlight tasks. Pass duration in ns or process name.",
+ parse_highlight),
+ OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
+ OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
+ "output processes data only"),
OPT_CALLBACK('p', "process", NULL, "process",
"process selector. Pass a pid or process name.",
parse_process),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
+ OPT_INTEGER('n', "proc-num", &tchart.proc_num,
+ "min. number of tasks to print"),
+ OPT_BOOLEAN('t', "topology", &tchart.topology,
+ "sort CPUs according to topology"),
OPT_END()
};
const char * const timechart_usage[] = {
@@ -1107,17 +1307,41 @@ int cmd_timechart(int argc, const char **argv,
NULL
};
- argc = parse_options(argc, argv, options, timechart_usage,
+ const struct option record_options[] = {
+ OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
+ OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
+ "output processes data only"),
+ OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
+ OPT_END()
+ };
+ const char * const record_usage[] = {
+ "perf timechart record [<options>]",
+ NULL
+ };
+ argc = parse_options(argc, argv, timechart_options, timechart_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ if (tchart.power_only && tchart.tasks_only) {
+ pr_err("-P and -T options cannot be used at the same time.\n");
+ return -1;
+ }
+
symbol__init();
- if (argc && !strncmp(argv[0], "rec", 3))
- return __cmd_record(argc, argv);
- else if (argc)
- usage_with_options(timechart_usage, options);
+ if (argc && !strncmp(argv[0], "rec", 3)) {
+ argc = parse_options(argc, argv, record_options, record_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (tchart.power_only && tchart.tasks_only) {
+ pr_err("-P and -T options cannot be used at the same time.\n");
+ return -1;
+ }
+
+ return timechart__record(&tchart, argc, argv);
+ } else if (argc)
+ usage_with_options(timechart_usage, timechart_options);
setup_pager();
- return __cmd_timechart(output_name);
+ return __cmd_timechart(&tchart, output_name);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 71e6402..76cd510 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -189,21 +189,18 @@ static void perf_top__record_precise_ip(struct perf_top *top,
if (pthread_mutex_trylock(&notes->lock))
return;
- if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
- pthread_mutex_unlock(&notes->lock);
- pr_err("Not enough memory for annotating '%s' symbol!\n",
- sym->name);
- sleep(1);
- return;
- }
-
ip = he->ms.map->map_ip(he->ms.map, ip);
- err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
+ err = hist_entry__inc_addr_samples(he, counter, ip);
pthread_mutex_unlock(&notes->lock);
if (err == -ERANGE && !he->ms.map->erange_warned)
ui__warn_map_erange(he->ms.map, sym, ip);
+ else if (err == -ENOMEM) {
+ pr_err("Not enough memory for annotating '%s' symbol!\n",
+ sym->name);
+ sleep(1);
+ }
}
static void perf_top__show_details(struct perf_top *top)
@@ -485,7 +482,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
fprintf(stderr, "\nAvailable events:");
- list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+ evlist__for_each(top->evlist, top->sym_evsel)
fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
@@ -496,7 +493,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
sleep(1);
break;
}
- list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+ evlist__for_each(top->evlist, top->sym_evsel)
if (top->sym_evsel->idx == counter)
break;
} else
@@ -578,7 +575,7 @@ static void *display_thread_tui(void *arg)
* Zooming in/out UIDs. For now juse use whatever the user passed
* via --uid.
*/
- list_for_each_entry(pos, &top->evlist->entries, node)
+ evlist__for_each(top->evlist, pos)
pos->hists.uid_filter_str = top->record_opts.target.uid_str;
perf_evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
@@ -634,26 +631,9 @@ repeat:
return NULL;
}
-/* Tag samples to be skipped. */
-static const char *skip_symbols[] = {
- "intel_idle",
- "default_idle",
- "native_safe_halt",
- "cpu_idle",
- "enter_idle",
- "exit_idle",
- "mwait_idle",
- "mwait_idle_with_hints",
- "poll_idle",
- "ppc64_runlatch_off",
- "pseries_dedicated_idle_sleep",
- NULL
-};
-
static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym)
{
const char *name = sym->name;
- int i;
/*
* ppc64 uses function descriptors and appends a '.' to the
@@ -671,12 +651,8 @@ static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym)
strstr(name, "_text_end"))
return 1;
- for (i = 0; skip_symbols[i]; i++) {
- if (!strcmp(skip_symbols[i], name)) {
- sym->ignore = true;
- break;
- }
- }
+ if (symbol__is_idle(sym))
+ sym->ignore = true;
return 0;
}
@@ -767,15 +743,10 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (al.sym == NULL || !al.sym->ignore) {
struct hist_entry *he;
- if ((sort__has_parent || symbol_conf.use_callchain) &&
- sample->callchain) {
- err = machine__resolve_callchain(machine, evsel,
- al.thread, sample,
- &parent, &al,
- top->max_stack);
- if (err)
- return;
- }
+ err = sample__resolve_callchain(sample, &parent, evsel, &al,
+ top->max_stack);
+ if (err)
+ return;
he = perf_evsel__add_hist_entry(evsel, &al, sample);
if (he == NULL) {
@@ -783,12 +754,9 @@ static void perf_event__process_sample(struct perf_tool *tool,
return;
}
- if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain, &callchain_cursor,
- sample->period);
- if (err)
- return;
- }
+ err = hist_entry__append_callchain(he, sample);
+ if (err)
+ return;
if (sort__has_sym)
perf_top__record_precise_ip(top, he, evsel->idx, ip);
@@ -878,11 +846,11 @@ static int perf_top__start_counters(struct perf_top *top)
char msg[512];
struct perf_evsel *counter;
struct perf_evlist *evlist = top->evlist;
- struct perf_record_opts *opts = &top->record_opts;
+ struct record_opts *opts = &top->record_opts;
perf_evlist__config(evlist, opts);
- list_for_each_entry(counter, &evlist->entries, node) {
+ evlist__for_each(evlist, counter) {
try_again:
if (perf_evsel__open(counter, top->evlist->cpus,
top->evlist->threads) < 0) {
@@ -930,7 +898,7 @@ static int perf_top__setup_sample_type(struct perf_top *top __maybe_unused)
static int __cmd_top(struct perf_top *top)
{
- struct perf_record_opts *opts = &top->record_opts;
+ struct record_opts *opts = &top->record_opts;
pthread_t thread;
int ret;
@@ -1052,7 +1020,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
.max_stack = PERF_MAX_STACK_DEPTH,
.sym_pcnt_filter = 5,
};
- struct perf_record_opts *opts = &top.record_opts;
+ struct record_opts *opts = &top.record_opts;
struct target *target = &opts->target;
const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
@@ -1084,7 +1052,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
- OPT_BOOLEAN('g', "group", &opts->group,
+ OPT_BOOLEAN(0, "group", &opts->group,
"put the counters into a counter group"),
OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
"child tasks do not inherit counters"),
@@ -1105,7 +1073,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
" abort, in_tx, transaction"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_CALLBACK_NOOPT('G', NULL, &top.record_opts,
+ OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts,
NULL, "enables call-graph recording",
&callchain_opt),
OPT_CALLBACK(0, "call-graph", &top.record_opts,
@@ -1195,7 +1163,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
ui__error("Not enough memory for event selector list\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
symbol_conf.nr_events = top.evlist->nr_entries;
@@ -1203,9 +1171,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (top.delay_secs < 1)
top.delay_secs = 1;
- if (perf_record_opts__config(opts)) {
+ if (record_opts__config(opts)) {
status = -EINVAL;
- goto out_delete_maps;
+ goto out_delete_evlist;
}
top.sym_evsel = perf_evlist__first(top.evlist);
@@ -1230,8 +1198,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
status = __cmd_top(&top);
-out_delete_maps:
- perf_evlist__delete_maps(top.evlist);
out_delete_evlist:
perf_evlist__delete(top.evlist);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 8be17fc..896f270 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -11,6 +11,8 @@
#include "util/intlist.h"
#include "util/thread_map.h"
#include "util/stat.h"
+#include "trace-event.h"
+#include "util/parse-events.h"
#include <libaudit.h>
#include <stdlib.h>
@@ -144,8 +146,7 @@ static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
static void perf_evsel__delete_priv(struct perf_evsel *evsel)
{
- free(evsel->priv);
- evsel->priv = NULL;
+ zfree(&evsel->priv);
perf_evsel__delete(evsel);
}
@@ -163,8 +164,7 @@ static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
return -ENOMEM;
out_delete:
- free(evsel->priv);
- evsel->priv = NULL;
+ zfree(&evsel->priv);
return -ENOENT;
}
@@ -172,6 +172,10 @@ static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void
{
struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
+ /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
+ if (evsel == NULL)
+ evsel = perf_evsel__newtp("syscalls", direction);
+
if (evsel) {
if (perf_evsel__init_syscall_tp(evsel, handler))
goto out_delete;
@@ -1153,29 +1157,30 @@ struct trace {
int max;
struct syscall *table;
} syscalls;
- struct perf_record_opts opts;
+ struct record_opts opts;
struct machine *host;
u64 base_time;
- bool full_time;
FILE *output;
unsigned long nr_events;
struct strlist *ev_qualifier;
- bool not_ev_qualifier;
- bool live;
const char *last_vfs_getname;
struct intlist *tid_list;
struct intlist *pid_list;
+ double duration_filter;
+ double runtime_ms;
+ struct {
+ u64 vfs_getname,
+ proc_getname;
+ } stats;
+ bool not_ev_qualifier;
+ bool live;
+ bool full_time;
bool sched;
bool multiple_threads;
bool summary;
bool summary_only;
bool show_comm;
bool show_tool_stats;
- double duration_filter;
- double runtime_ms;
- struct {
- u64 vfs_getname, proc_getname;
- } stats;
};
static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
@@ -1272,10 +1277,8 @@ static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
struct thread_trace *ttrace = arg->thread->priv;
- if (ttrace && fd >= 0 && fd <= ttrace->paths.max) {
- free(ttrace->paths.table[fd]);
- ttrace->paths.table[fd] = NULL;
- }
+ if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
+ zfree(&ttrace->paths.table[fd]);
return printed;
}
@@ -1430,11 +1433,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->fmt = syscall_fmt__find(sc->name);
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
- sc->tp_format = event_format__new("syscalls", tp_name);
+ sc->tp_format = trace_event__tp_format("syscalls", tp_name);
if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
- sc->tp_format = event_format__new("syscalls", tp_name);
+ sc->tp_format = trace_event__tp_format("syscalls", tp_name);
}
if (sc->tp_format == NULL)
@@ -1764,8 +1767,10 @@ static int trace__process_sample(struct perf_tool *tool,
if (!trace->full_time && trace->base_time == 0)
trace->base_time = sample->time;
- if (handler)
+ if (handler) {
+ ++trace->nr_events;
handler(trace, evsel, sample);
+ }
return err;
}
@@ -1800,10 +1805,11 @@ static int trace__record(int argc, const char **argv)
"-R",
"-m", "1024",
"-c", "1",
- "-e", "raw_syscalls:sys_enter,raw_syscalls:sys_exit",
+ "-e",
};
- rec_argc = ARRAY_SIZE(record_args) + argc;
+ /* +1 is for the event string below */
+ rec_argc = ARRAY_SIZE(record_args) + 1 + argc;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
@@ -1812,6 +1818,17 @@ static int trace__record(int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = record_args[i];
+ /* event string may be different for older kernels - e.g., RHEL6 */
+ if (is_valid_tracepoint("raw_syscalls:sys_enter"))
+ rec_argv[i] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
+ else if (is_valid_tracepoint("syscalls:sys_enter"))
+ rec_argv[i] = "syscalls:sys_enter,syscalls:sys_exit";
+ else {
+ pr_err("Neither raw_syscalls nor syscalls events exist.\n");
+ return -1;
+ }
+ i++;
+
for (j = 0; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
@@ -1869,7 +1886,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = trace__symbols_init(trace, evlist);
if (err < 0) {
fprintf(trace->output, "Problems initializing symbol libraries!\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
perf_evlist__config(evlist, &trace->opts);
@@ -1879,10 +1896,10 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (forks) {
err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
- argv, false, false);
+ argv, false, NULL);
if (err < 0) {
fprintf(trace->output, "Couldn't run the workload!\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
}
@@ -1890,10 +1907,10 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_open;
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
if (err < 0) {
fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
- goto out_close_evlist;
+ goto out_delete_evlist;
}
perf_evlist__enable(evlist);
@@ -1977,11 +1994,6 @@ out_disable:
}
}
- perf_evlist__munmap(evlist);
-out_close_evlist:
- perf_evlist__close(evlist);
-out_delete_maps:
- perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
@@ -2047,6 +2059,10 @@ static int trace__replay(struct trace *trace)
evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
"raw_syscalls:sys_enter");
+ /* older kernels have syscalls tp versus raw_syscalls */
+ if (evsel == NULL)
+ evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
+ "syscalls:sys_enter");
if (evsel == NULL) {
pr_err("Data file does not have raw_syscalls:sys_enter event\n");
goto out;
@@ -2060,6 +2076,9 @@ static int trace__replay(struct trace *trace)
evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
"raw_syscalls:sys_exit");
+ if (evsel == NULL)
+ evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
+ "syscalls:sys_exit");
if (evsel == NULL) {
pr_err("Data file does not have raw_syscalls:sys_exit event\n");
goto out;
@@ -2158,7 +2177,6 @@ static int trace__fprintf_one_thread(struct thread *thread, void *priv)
size_t printed = data->printed;
struct trace *trace = data->trace;
struct thread_trace *ttrace = thread->priv;
- const char *color;
double ratio;
if (ttrace == NULL)
@@ -2166,17 +2184,9 @@ static int trace__fprintf_one_thread(struct thread *thread, void *priv)
ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 50.0)
- color = PERF_COLOR_RED;
- else if (ratio > 25.0)
- color = PERF_COLOR_GREEN;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
-
- printed += color_fprintf(fp, color, " %s (%d), ", thread__comm_str(thread), thread->tid);
+ printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
- printed += color_fprintf(fp, color, "%.1f%%", ratio);
+ printed += fprintf(fp, "%.1f%%", ratio);
printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
printed += thread__dump_stats(ttrace, trace, fp);
@@ -2248,7 +2258,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
},
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
- .no_delay = true,
+ .no_buffering = true,
.mmap_pages = 1024,
},
.output = stdout,
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index f7d11a8..d604e50 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -1,28 +1,26 @@
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
- -e s/arm.*/arm/ -e s/sa110/arm/ \
- -e s/s390x/s390/ -e s/parisc64/parisc/ \
- -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
- -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
-NO_PERF_REGS := 1
-CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
+ifeq ($(src-perf),)
+src-perf := $(srctree)/tools/perf
+endif
-# Additional ARCH settings for x86
-ifeq ($(ARCH),i386)
- override ARCH := x86
- NO_PERF_REGS := 0
- LIBUNWIND_LIBS = -lunwind -lunwind-x86
+ifeq ($(obj-perf),)
+obj-perf := $(OUTPUT)
endif
-ifeq ($(ARCH),x86_64)
- override ARCH := x86
- IS_X86_64 := 0
- ifeq (, $(findstring m32,$(CFLAGS)))
- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
- endif
+ifneq ($(obj-perf),)
+obj-perf := $(abspath $(obj-perf))/
+endif
+
+LIB_INCLUDE := $(srctree)/tools/lib/
+CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
+
+include $(src-perf)/config/Makefile.arch
+
+NO_PERF_REGS := 1
+
+# Additional ARCH settings for x86
+ifeq ($(ARCH),x86)
ifeq (${IS_X86_64}, 1)
- RAW_ARCH := x86_64
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
@@ -36,24 +34,31 @@ ifeq ($(ARCH),arm)
LIBUNWIND_LIBS = -lunwind -lunwind-arm
endif
-ifeq ($(NO_PERF_REGS),0)
- CFLAGS += -DHAVE_PERF_REGS_SUPPORT
-endif
-
-ifeq ($(src-perf),)
-src-perf := $(srctree)/tools/perf
-endif
+ifeq ($(LIBUNWIND_LIBS),)
+ NO_LIBUNWIND := 1
+else
+ #
+ # For linking with debug library, run like:
+ #
+ # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+ #
+ ifdef LIBUNWIND_DIR
+ LIBUNWIND_CFLAGS = -I$(LIBUNWIND_DIR)/include
+ LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
+ endif
+ LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS)
-ifeq ($(obj-perf),)
-obj-perf := $(OUTPUT)
+ # Set per-feature check compilation flags
+ FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS)
+ FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS)
endif
-ifneq ($(obj-perf),)
-obj-perf := $(abspath $(obj-perf))/
+ifeq ($(NO_PERF_REGS),0)
+ CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
-LIB_INCLUDE := $(srctree)/tools/lib/
-
# include ARCH specific config
-include $(src-perf)/arch/$(ARCH)/Makefile
@@ -102,7 +107,7 @@ endif
feature_check = $(eval $(feature_check_code))
define feature_check_code
- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS="$(LDFLAGS)" LIBUNWIND_LIBS="$(LIBUNWIND_LIBS)" -C config/feature-checks test-$1 >/dev/null 2>/dev/null && echo 1 || echo 0)
+ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C config/feature-checks test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
endef
feature_set = $(eval $(feature_set_code))
@@ -141,16 +146,26 @@ CORE_FEATURE_TESTS = \
libslang \
libunwind \
on-exit \
- stackprotector \
stackprotector-all \
timerfd
+# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
+# If in the future we need per-feature checks/flags for features not
+# mentioned in this list we need to refactor this ;-).
+set_test_all_flags = $(eval $(set_test_all_flags_code))
+define set_test_all_flags_code
+ FEATURE_CHECK_CFLAGS-all += $(FEATURE_CHECK_CFLAGS-$(1))
+ FEATURE_CHECK_LDFLAGS-all += $(FEATURE_CHECK_LDFLAGS-$(1))
+endef
+
+$(foreach feat,$(CORE_FEATURE_TESTS),$(call set_test_all_flags,$(feat)))
+
#
# So here we detect whether test-all was rebuilt, to be able
# to skip the print-out of the long features list if the file
# existed before and after it was built:
#
-ifeq ($(wildcard $(OUTPUT)config/feature-checks/test-all),)
+ifeq ($(wildcard $(OUTPUT)config/feature-checks/test-all.bin),)
test-all-failed := 1
else
test-all-failed := 0
@@ -180,7 +195,7 @@ ifeq ($(feature-all), 1)
#
$(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_set,$(feat)))
else
- $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C config/feature-checks $(CORE_FEATURE_TESTS) >/dev/null 2>&1)
+ $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C config/feature-checks $(addsuffix .bin,$(CORE_FEATURE_TESTS)) >/dev/null 2>&1)
$(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat)))
endif
@@ -209,10 +224,6 @@ ifeq ($(feature-stackprotector-all), 1)
CFLAGS += -fstack-protector-all
endif
-ifeq ($(feature-stackprotector), 1)
- CFLAGS += -Wstack-protector
-endif
-
ifeq ($(DEBUG),0)
ifeq ($(feature-fortify-source), 1)
CFLAGS += -D_FORTIFY_SOURCE=2
@@ -221,6 +232,7 @@ endif
CFLAGS += -I$(src-perf)/util/include
CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
+CFLAGS += -I$(srctree)/tools/include/
CFLAGS += -I$(srctree)/arch/$(ARCH)/include/uapi
CFLAGS += -I$(srctree)/arch/$(ARCH)/include
CFLAGS += -I$(srctree)/include/uapi
@@ -310,21 +322,7 @@ ifndef NO_LIBELF
endif # NO_DWARF
endif # NO_LIBELF
-ifeq ($(LIBUNWIND_LIBS),)
- NO_LIBUNWIND := 1
-endif
-
ifndef NO_LIBUNWIND
- #
- # For linking with debug library, run like:
- #
- # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
- #
- ifdef LIBUNWIND_DIR
- LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include
- LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
- endif
-
ifneq ($(feature-libunwind), 1)
msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
NO_LIBUNWIND := 1
@@ -339,14 +337,12 @@ ifndef NO_LIBUNWIND
# non-ARM has no dwarf_find_debug_frame() function:
CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
endif
- endif
-endif
-ifndef NO_LIBUNWIND
- CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
- EXTLIBS += $(LIBUNWIND_LIBS)
- CFLAGS += $(LIBUNWIND_CFLAGS)
- LDFLAGS += $(LIBUNWIND_LDFLAGS)
+ CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
+ EXTLIBS += $(LIBUNWIND_LIBS)
+ CFLAGS += $(LIBUNWIND_CFLAGS)
+ LDFLAGS += $(LIBUNWIND_LDFLAGS)
+ endif # ifneq ($(feature-libunwind), 1)
endif
ifndef NO_LIBAUDIT
@@ -376,7 +372,7 @@ ifndef NO_SLANG
endif
ifndef NO_GTK2
- FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+ FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
ifneq ($(feature-gtk2), 1)
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
NO_GTK2 := 1
@@ -385,8 +381,8 @@ ifndef NO_GTK2
GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
endif
CFLAGS += -DHAVE_GTK2_SUPPORT
- GTK_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
- GTK_LIBS := $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
+ GTK_CFLAGS += $(shell $(PKG_CONFIG) --cflags gtk+-2.0 2>/dev/null)
+ GTK_LIBS := $(shell $(PKG_CONFIG) --libs gtk+-2.0 2>/dev/null)
EXTLIBS += -ldl
endif
endif
@@ -533,7 +529,7 @@ endif
ifndef NO_LIBNUMA
ifeq ($(feature-libnuma), 0)
- msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev);
+ msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev);
NO_LIBNUMA := 1
else
CFLAGS += -DHAVE_LIBNUMA_SUPPORT
@@ -598,3 +594,11 @@ else
perfexec_instdir = $(prefix)/$(perfexecdir)
endif
perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
+
+# If we install to $(HOME) we keep the traceevent default:
+# $(HOME)/.traceevent/plugins
+# Otherwise we install plugins into the global $(libdir).
+ifdef DESTDIR
+plugindir=$(libdir)/traceevent/plugins
+plugindir_SQ= $(subst ','\'',$(prefix)/$(plugindir))
+endif
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
new file mode 100644
index 0000000..fef8ae9
--- /dev/null
+++ b/tools/perf/config/Makefile.arch
@@ -0,0 +1,22 @@
+
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+ -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e s/s390x/s390/ -e s/parisc64/parisc/ \
+ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
+
+# Additional ARCH settings for x86
+ifeq ($(ARCH),i386)
+ override ARCH := x86
+endif
+
+ifeq ($(ARCH),x86_64)
+ override ARCH := x86
+ IS_X86_64 := 0
+ ifeq (, $(findstring m32,$(CFLAGS)))
+ IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
+ RAW_ARCH := x86_64
+ endif
+endif
diff --git a/tools/perf/config/feature-checks/.gitignore b/tools/perf/config/feature-checks/.gitignore
new file mode 100644
index 0000000..80f3da0
--- /dev/null
+++ b/tools/perf/config/feature-checks/.gitignore
@@ -0,0 +1,2 @@
+*.d
+*.bin
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 87e7900..12e5513 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -1,95 +1,92 @@
FILES= \
- test-all \
- test-backtrace \
- test-bionic \
- test-dwarf \
- test-fortify-source \
- test-glibc \
- test-gtk2 \
- test-gtk2-infobar \
- test-hello \
- test-libaudit \
- test-libbfd \
- test-liberty \
- test-liberty-z \
- test-cplus-demangle \
- test-libelf \
- test-libelf-getphdrnum \
- test-libelf-mmap \
- test-libnuma \
- test-libperl \
- test-libpython \
- test-libpython-version \
- test-libslang \
- test-libunwind \
- test-libunwind-debug-frame \
- test-on-exit \
- test-stackprotector-all \
- test-stackprotector \
- test-timerfd
-
-CC := $(CC) -MD
+ test-all.bin \
+ test-backtrace.bin \
+ test-bionic.bin \
+ test-dwarf.bin \
+ test-fortify-source.bin \
+ test-glibc.bin \
+ test-gtk2.bin \
+ test-gtk2-infobar.bin \
+ test-hello.bin \
+ test-libaudit.bin \
+ test-libbfd.bin \
+ test-liberty.bin \
+ test-liberty-z.bin \
+ test-cplus-demangle.bin \
+ test-libelf.bin \
+ test-libelf-getphdrnum.bin \
+ test-libelf-mmap.bin \
+ test-libnuma.bin \
+ test-libperl.bin \
+ test-libpython.bin \
+ test-libpython-version.bin \
+ test-libslang.bin \
+ test-libunwind.bin \
+ test-libunwind-debug-frame.bin \
+ test-on-exit.bin \
+ test-stackprotector-all.bin \
+ test-timerfd.bin
+
+CC := $(CROSS_COMPILE)gcc -MD
+PKG_CONFIG := $(CROSS_COMPILE)pkg-config
all: $(FILES)
-BUILD = $(CC) $(CFLAGS) $(LDFLAGS) -o $(OUTPUT)$@ $@.c
+BUILD = $(CC) $(CFLAGS) -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
###############################
-test-all:
- $(BUILD) -Werror -fstack-protector -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma $(LIBUNWIND_LIBS) -lelf -laudit -I/usr/include/slang -lslang $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl
+test-all.bin:
+ $(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl
-test-hello:
+test-hello.bin:
$(BUILD)
-test-stackprotector-all:
+test-stackprotector-all.bin:
$(BUILD) -Werror -fstack-protector-all
-test-stackprotector:
- $(BUILD) -Werror -fstack-protector -Wstack-protector
-
-test-fortify-source:
+test-fortify-source.bin:
$(BUILD) -O2 -Werror -D_FORTIFY_SOURCE=2
-test-bionic:
+test-bionic.bin:
$(BUILD)
-test-libelf:
+test-libelf.bin:
$(BUILD) -lelf
-test-glibc:
+test-glibc.bin:
$(BUILD)
-test-dwarf:
+test-dwarf.bin:
$(BUILD) -ldw
-test-libelf-mmap:
+test-libelf-mmap.bin:
$(BUILD) -lelf
-test-libelf-getphdrnum:
+test-libelf-getphdrnum.bin:
$(BUILD) -lelf
-test-libnuma:
+test-libnuma.bin:
$(BUILD) -lnuma
-test-libunwind:
- $(BUILD) $(LIBUNWIND_LIBS) -lelf
+test-libunwind.bin:
+ $(BUILD) -lelf
-test-libunwind-debug-frame:
- $(BUILD) $(LIBUNWIND_LIBS) -lelf
+test-libunwind-debug-frame.bin:
+ $(BUILD) -lelf
-test-libaudit:
+test-libaudit.bin:
$(BUILD) -laudit
-test-libslang:
+test-libslang.bin:
$(BUILD) -I/usr/include/slang -lslang
-test-gtk2:
- $(BUILD) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+test-gtk2.bin:
+ $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
-test-gtk2-infobar:
- $(BUILD) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+test-gtk2-infobar.bin:
+ $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
@@ -100,7 +97,7 @@ PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
-test-libperl:
+test-libperl.bin:
$(BUILD) $(FLAGS_PERL_EMBED)
override PYTHON := python
@@ -117,31 +114,31 @@ PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_CCOPTS = $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
FLAGS_PYTHON_EMBED = $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
-test-libpython:
+test-libpython.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
-test-libpython-version:
+test-libpython-version.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
-test-libbfd:
+test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
-test-liberty:
+test-liberty.bin:
$(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
-test-liberty-z:
+test-liberty-z.bin:
$(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
-test-cplus-demangle:
+test-cplus-demangle.bin:
$(BUILD) -liberty
-test-on-exit:
+test-on-exit.bin:
$(BUILD)
-test-backtrace:
+test-backtrace.bin:
$(BUILD)
-test-timerfd:
+test-timerfd.bin:
$(BUILD)
-include *.d
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index 59e7a70..9b8a544 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -85,6 +85,10 @@
# include "test-timerfd.c"
#undef main
+#define main main_test_stackprotector_all
+# include "test-stackprotector-all.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -106,6 +110,7 @@ int main(int argc, char *argv[])
main_test_backtrace();
main_test_libnuma();
main_test_timerfd();
+ main_test_stackprotector_all();
return 0;
}
diff --git a/tools/perf/config/feature-checks/test-stackprotector.c b/tools/perf/config/feature-checks/test-stackprotector.c
deleted file mode 100644
index c9f398d..0000000
--- a/tools/perf/config/feature-checks/test-stackprotector.c
+++ /dev/null
@@ -1,6 +0,0 @@
-#include <stdio.h>
-
-int main(void)
-{
- return puts("hi");
-}
diff --git a/tools/perf/config/feature-checks/test-volatile-register-var.c b/tools/perf/config/feature-checks/test-volatile-register-var.c
deleted file mode 100644
index c9f398d..0000000
--- a/tools/perf/config/feature-checks/test-volatile-register-var.c
+++ /dev/null
@@ -1,6 +0,0 @@
-#include <stdio.h>
-
-int main(void)
-{
- return puts("hi");
-}
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index f168deb..4d985e0 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -178,10 +178,3 @@ endef
_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
_gea_warn = $(warning The path '$(1)' is not executable.)
_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
-
-ifneq ($(findstring $(MAKEFLAGS),s),s)
- ifneq ($(V),1)
- QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
- QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
- endif
-endif
diff --git a/tools/perf/bash_completion b/tools/perf/perf-completion.sh
index 62e157db..496e2ab 100644
--- a/tools/perf/bash_completion
+++ b/tools/perf/perf-completion.sh
@@ -1,4 +1,4 @@
-# perf completion
+# perf bash and zsh completion
# Taken from git.git's completion script.
__my_reassemble_comp_words_by_ref()
@@ -89,37 +89,117 @@ __ltrim_colon_completions()
fi
}
-type perf &>/dev/null &&
-_perf()
+__perfcomp ()
{
- local cur words cword prev cmd
+ COMPREPLY=( $( compgen -W "$1" -- "$2" ) )
+}
- COMPREPLY=()
- _get_comp_words_by_ref -n =: cur words cword prev
+__perfcomp_colon ()
+{
+ __perfcomp "$1" "$2"
+ __ltrim_colon_completions $cur
+}
+
+__perf_main ()
+{
+ local cmd
cmd=${words[0]}
+ COMPREPLY=()
# List perf subcommands or long options
if [ $cword -eq 1 ]; then
if [[ $cur == --* ]]; then
- COMPREPLY=( $( compgen -W '--help --version \
+ __perfcomp '--help --version \
--exec-path --html-path --paginate --no-pager \
- --perf-dir --work-tree --debugfs-dir' -- "$cur" ) )
+ --perf-dir --work-tree --debugfs-dir' -- "$cur"
else
cmds=$($cmd --list-cmds)
- COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) )
+ __perfcomp "$cmds" "$cur"
fi
# List possible events for -e option
elif [[ $prev == "-e" && "${words[1]}" == @(record|stat|top) ]]; then
evts=$($cmd list --raw-dump)
- COMPREPLY=( $( compgen -W '$evts' -- "$cur" ) )
- __ltrim_colon_completions $cur
+ __perfcomp_colon "$evts" "$cur"
+ # List subcommands for 'perf kvm'
+ elif [[ $prev == "kvm" ]]; then
+ subcmds="top record report diff buildid-list stat"
+ __perfcomp_colon "$subcmds" "$cur"
# List long option names
elif [[ $cur == --* ]]; then
subcmd=${words[1]}
opts=$($cmd $subcmd --list-opts)
- COMPREPLY=( $( compgen -W '$opts' -- "$cur" ) )
+ __perfcomp "$opts" "$cur"
fi
+}
+
+if [[ -n ${ZSH_VERSION-} ]]; then
+ autoload -U +X compinit && compinit
+
+ __perfcomp ()
+ {
+ emulate -L zsh
+
+ local c IFS=$' \t\n'
+ local -a array
+
+ for c in ${=1}; do
+ case $c in
+ --*=*|*.) ;;
+ *) c="$c " ;;
+ esac
+ array[${#array[@]}+1]="$c"
+ done
+
+ compset -P '*[=:]'
+ compadd -Q -S '' -a -- array && _ret=0
+ }
+
+ __perfcomp_colon ()
+ {
+ emulate -L zsh
+
+ local cur_="${2-$cur}"
+ local c IFS=$' \t\n'
+ local -a array
+
+ if [[ "$cur_" == *:* ]]; then
+ local colon_word=${cur_%"${cur_##*:}"}
+ fi
+
+ for c in ${=1}; do
+ case $c in
+ --*=*|*.) ;;
+ *) c="$c " ;;
+ esac
+ array[$#array+1]=${c#"$colon_word"}
+ done
+
+ compset -P '*[=:]'
+ compadd -Q -S '' -a -- array && _ret=0
+ }
+
+ _perf ()
+ {
+ local _ret=1 cur cword prev
+ cur=${words[CURRENT]}
+ prev=${words[CURRENT-1]}
+ let cword=CURRENT-1
+ emulate ksh -c __perf_main
+ let _ret && _default && _ret=0
+ return _ret
+ }
+
+ compdef _perf perf
+ return
+fi
+
+type perf &>/dev/null &&
+_perf()
+{
+ local cur words cword prev
+ _get_comp_words_by_ref -n =: cur words cword prev
+ __perf_main
} &&
complete -o bashdefault -o default -o nospace -F _perf perf 2>/dev/null \
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 8b38b4e..431798a 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -13,7 +13,7 @@
#include "util/quote.h"
#include "util/run-command.h"
#include "util/parse-events.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include <pthread.h>
const char perf_usage_string[] =
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index b079304..3c2f213 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -247,13 +247,14 @@ enum perf_call_graph_mode {
CALLCHAIN_DWARF
};
-struct perf_record_opts {
+struct record_opts {
struct target target;
int call_graph;
bool group;
bool inherit_stat;
- bool no_delay;
+ bool no_buffering;
bool no_inherit;
+ bool no_inherit_set;
bool no_samples;
bool raw_samples;
bool sample_address;
@@ -268,6 +269,7 @@ struct perf_record_opts {
u64 user_interval;
u16 stack_dump_size;
bool sample_transaction;
+ unsigned initial_delay;
};
#endif
diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit
index 9079a25..44edcb2 100644
--- a/tools/perf/tests/attr/test-record-no-inherit
+++ b/tools/perf/tests/attr/test-record-no-inherit
@@ -3,5 +3,5 @@ command = record
args = -i kill >/dev/null 2>&1
[event:base-record]
-sample_type=259
+sample_type=263
inherit=0
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 85d4919..653a8fe 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -391,7 +391,7 @@ static int do_test_code_reading(bool try_kcore)
struct machines machines;
struct machine *machine;
struct thread *thread;
- struct perf_record_opts opts = {
+ struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
@@ -540,14 +540,11 @@ static int do_test_code_reading(bool try_kcore)
err = TEST_CODE_READING_OK;
out_err:
if (evlist) {
- perf_evlist__munmap(evlist);
- perf_evlist__close(evlist);
perf_evlist__delete(evlist);
- }
- if (cpus)
+ } else {
cpu_map__delete(cpus);
- if (threads)
thread_map__delete(threads);
+ }
machines__destroy_kernel_maps(&machines);
machine__delete_threads(machine);
machines__exit(&machines);
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 0197bda..465cdbc 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -79,7 +79,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
}
err = 0;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
--err;
pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 173bf42..2b6519e 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -208,7 +208,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
* However the second evsel also has a collapsed entry for
* "bash [libc] malloc" so total 9 entries will be in the tree.
*/
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
const union perf_event event = {
.header = {
@@ -466,7 +466,7 @@ int test__hists_link(void)
if (err < 0)
goto out;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
hists__collapse_resort(&evsel->hists, NULL);
if (verbose > 2)
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 376c356..497957f 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -51,7 +51,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
*/
int test__keep_tracking(void)
{
- struct perf_record_opts opts = {
+ struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
@@ -142,14 +142,11 @@ int test__keep_tracking(void)
out_err:
if (evlist) {
perf_evlist__disable(evlist);
- perf_evlist__munmap(evlist);
- perf_evlist__close(evlist);
perf_evlist__delete(evlist);
- }
- if (cpus)
+ } else {
cpu_map__delete(cpus);
- if (threads)
thread_map__delete(threads);
+ }
return err;
}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 2ca0abf..00544b8 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -1,6 +1,16 @@
PERF := .
MK := Makefile
+include config/Makefile.arch
+
+# FIXME looks like x86 is the only arch running tests ;-)
+# we need some IS_(32/64) flag to make this generic
+ifeq ($(IS_X86_64),1)
+lib = lib64
+else
+lib = lib
+endif
+
has = $(shell which $1 2>/dev/null)
# standard single make variable specified
@@ -106,10 +116,36 @@ test_make_python_perf_so := test -f $(PERF)/python/perf.so
test_make_perf_o := test -f $(PERF)/perf.o
test_make_util_map_o := test -f $(PERF)/util/map.o
-test_make_install := test -x $$TMP_DEST/bin/perf
-test_make_install_O := $(test_make_install)
-test_make_install_bin := $(test_make_install)
-test_make_install_bin_O := $(test_make_install)
+define test_dest_files
+ for file in $(1); do \
+ if [ ! -x $$TMP_DEST/$$file ]; then \
+ echo " failed to find: $$file"; \
+ fi \
+ done
+endef
+
+installed_files_bin := bin/perf
+installed_files_bin += etc/bash_completion.d/perf
+installed_files_bin += libexec/perf-core/perf-archive
+
+installed_files_plugins := $(lib)/traceevent/plugins/plugin_cfg80211.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_scsi.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_xen.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_function.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_sched_switch.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_mac80211.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_kvm.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_kmem.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_hrtimer.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_jbd2.so
+
+installed_files_all := $(installed_files_bin)
+installed_files_all += $(installed_files_plugins)
+
+test_make_install := $(call test_dest_files,$(installed_files_all))
+test_make_install_O := $(call test_dest_files,$(installed_files_all))
+test_make_install_bin := $(call test_dest_files,$(installed_files_bin))
+test_make_install_bin_O := $(call test_dest_files,$(installed_files_bin))
# FIXME nothing gets installed
test_make_install_man := test -f $$TMP_DEST/share/man/man1/perf.1
@@ -162,7 +198,7 @@ $(run):
cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1; \
- echo " test: $(call test,$@)"; \
+ echo " test: $(call test,$@)" >> $@ 2>&1; \
$(call test,$@) && \
rm -f $@ \
rm -rf $$TMP_DEST
@@ -174,16 +210,22 @@ $(run_O):
cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1 && \
- echo " test: $(call test_O,$@)"; \
+ echo " test: $(call test_O,$@)" >> $@ 2>&1; \
$(call test_O,$@) && \
rm -f $@ && \
rm -rf $$TMP_O \
rm -rf $$TMP_DEST
-all: $(run) $(run_O)
+tarpkg:
+ @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
+ echo "- $@: $$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1
+
+
+all: $(run) $(run_O) tarpkg
@echo OK
out: $(run_O)
@echo OK
-.PHONY: all $(run) $(run_O) clean
+.PHONY: all $(run) $(run_O) tarpkg clean
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index d64ab79..1422634 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -68,7 +68,7 @@ int test__basic_mmap(void)
evsels[i] = perf_evsel__newtp("syscalls", name);
if (evsels[i] == NULL) {
pr_debug("perf_evsel__new\n");
- goto out_free_evlist;
+ goto out_delete_evlist;
}
evsels[i]->attr.wakeup_events = 1;
@@ -80,7 +80,7 @@ int test__basic_mmap(void)
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
- goto out_close_fd;
+ goto out_delete_evlist;
}
nr_events[i] = 0;
@@ -90,7 +90,7 @@ int test__basic_mmap(void)
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
strerror(errno));
- goto out_close_fd;
+ goto out_delete_evlist;
}
for (i = 0; i < nsyscalls; ++i)
@@ -105,13 +105,13 @@ int test__basic_mmap(void)
if (event->header.type != PERF_RECORD_SAMPLE) {
pr_debug("unexpected %s event\n",
perf_event__name(event->header.type));
- goto out_munmap;
+ goto out_delete_evlist;
}
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
- goto out_munmap;
+ goto out_delete_evlist;
}
err = -1;
@@ -119,30 +119,27 @@ int test__basic_mmap(void)
if (evsel == NULL) {
pr_debug("event with id %" PRIu64
" doesn't map to an evsel\n", sample.id);
- goto out_munmap;
+ goto out_delete_evlist;
}
nr_events[evsel->idx]++;
perf_evlist__mmap_consume(evlist, 0);
}
err = 0;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
pr_debug("expected %d %s events, got %d\n",
expected_nr_events[evsel->idx],
perf_evsel__name(evsel), nr_events[evsel->idx]);
err = -1;
- goto out_munmap;
+ goto out_delete_evlist;
}
}
-out_munmap:
- perf_evlist__munmap(evlist);
-out_close_fd:
- for (i = 0; i < nsyscalls; ++i)
- perf_evsel__close_fd(evsels[i], 1, threads->nr);
-out_free_evlist:
+out_delete_evlist:
perf_evlist__delete(evlist);
+ cpus = NULL;
+ threads = NULL;
out_free_cpus:
cpu_map__delete(cpus);
out_free_threads:
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index 41cc0ba..c505ef2 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -6,15 +6,15 @@
int test__syscall_open_tp_fields(void)
{
- struct perf_record_opts opts = {
+ struct record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
- .no_delay = true,
- .freq = 1,
- .mmap_pages = 256,
- .raw_samples = true,
+ .no_buffering = true,
+ .freq = 1,
+ .mmap_pages = 256,
+ .raw_samples = true,
};
const char *filename = "/etc/passwd";
int flags = O_RDONLY | O_DIRECTORY;
@@ -48,13 +48,13 @@ int test__syscall_open_tp_fields(void)
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
- goto out_delete_maps;
+ goto out_delete_evlist;
}
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
- goto out_close_evlist;
+ goto out_delete_evlist;
}
perf_evlist__enable(evlist);
@@ -85,7 +85,7 @@ int test__syscall_open_tp_fields(void)
err = perf_evsel__parse_sample(evsel, event, &sample);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
- goto out_munmap;
+ goto out_delete_evlist;
}
tp_flags = perf_evsel__intval(evsel, &sample, "flags");
@@ -93,7 +93,7 @@ int test__syscall_open_tp_fields(void)
if (flags != tp_flags) {
pr_debug("%s: Expected flags=%#x, got %#x\n",
__func__, flags, tp_flags);
- goto out_munmap;
+ goto out_delete_evlist;
}
goto out_ok;
@@ -105,17 +105,11 @@ int test__syscall_open_tp_fields(void)
if (++nr_polls > 5) {
pr_debug("%s: no events!\n", __func__);
- goto out_munmap;
+ goto out_delete_evlist;
}
}
out_ok:
err = 0;
-out_munmap:
- perf_evlist__munmap(evlist);
-out_close_evlist:
- perf_evlist__close(evlist);
-out_delete_maps:
- perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 3cbd104..4db0ae6 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -3,7 +3,7 @@
#include "evsel.h"
#include "evlist.h"
#include "fs.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include "tests.h"
#include <linux/hw_breakpoint.h>
@@ -30,7 +30,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
@@ -201,7 +201,7 @@ test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
TEST_ASSERT_VAL("wrong exclude_user",
!evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel",
@@ -1385,10 +1385,10 @@ static int test_event(struct evlist_test *e)
if (ret) {
pr_debug("failed to parse event '%s', err %d\n",
e->name, ret);
- return ret;
+ } else {
+ ret = e->check(evlist);
}
-
- ret = e->check(evlist);
+
perf_evlist__delete(evlist);
return ret;
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 93a62b0..aca1a83 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -34,14 +34,14 @@ realloc:
int test__PERF_RECORD(void)
{
- struct perf_record_opts opts = {
+ struct record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
- .no_delay = true,
- .freq = 10,
- .mmap_pages = 256,
+ .no_buffering = true,
+ .freq = 10,
+ .mmap_pages = 256,
};
cpu_set_t cpu_mask;
size_t cpu_mask_size = sizeof(cpu_mask);
@@ -83,11 +83,10 @@ int test__PERF_RECORD(void)
* so that we have time to open the evlist (calling sys_perf_event_open
* on all the fds) and then mmap them.
*/
- err = perf_evlist__prepare_workload(evlist, &opts.target, argv,
- false, false);
+ err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
/*
@@ -102,7 +101,7 @@ int test__PERF_RECORD(void)
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {
pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
- goto out_delete_maps;
+ goto out_delete_evlist;
}
cpu = err;
@@ -112,7 +111,7 @@ int test__PERF_RECORD(void)
*/
if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
pr_debug("sched_setaffinity: %s\n", strerror(errno));
- goto out_delete_maps;
+ goto out_delete_evlist;
}
/*
@@ -122,7 +121,7 @@ int test__PERF_RECORD(void)
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
- goto out_delete_maps;
+ goto out_delete_evlist;
}
/*
@@ -133,7 +132,7 @@ int test__PERF_RECORD(void)
err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
- goto out_close_evlist;
+ goto out_delete_evlist;
}
/*
@@ -166,7 +165,7 @@ int test__PERF_RECORD(void)
if (verbose)
perf_event__fprintf(event, stderr);
pr_debug("Couldn't parse sample\n");
- goto out_err;
+ goto out_delete_evlist;
}
if (verbose) {
@@ -303,12 +302,6 @@ found_exit:
pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
++errs;
}
-out_err:
- perf_evlist__munmap(evlist);
-out_close_evlist:
- perf_evlist__close(evlist);
-out_delete_maps:
- perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg
new file mode 100755
index 0000000..238aa39
--- /dev/null
+++ b/tools/perf/tests/perf-targz-src-pkg
@@ -0,0 +1,21 @@
+#!/bin/sh
+# Test one of the main kernel Makefile targets to generate a perf sources tarball
+# suitable for build outside the full kernel sources.
+#
+# This is to test that the tools/perf/MANIFEST file lists all the files needed to
+# be in such tarball, which sometimes gets broken when we move files around,
+# like when we made some files that were in tools/perf/ available to other tools/
+# codebases by moving it to tools/include/, etc.
+
+PERF=$1
+cd ${PERF}/../..
+make perf-targz-src-pkg > /dev/null
+TARBALL=$(ls -rt perf-*.tar.gz)
+TMP_DEST=$(mktemp -d)
+tar xf ${TARBALL} -C $TMP_DEST
+rm -f ${TARBALL}
+cd - > /dev/null
+make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1
+RC=$?
+rm -rf ${TMP_DEST}
+exit $RC
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 4ca1b93..47146d3 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -46,7 +46,7 @@ static u64 rdtsc(void)
*/
int test__perf_time_to_tsc(void)
{
- struct perf_record_opts opts = {
+ struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
@@ -166,14 +166,8 @@ next_event:
out_err:
if (evlist) {
perf_evlist__disable(evlist);
- perf_evlist__munmap(evlist);
- perf_evlist__close(evlist);
perf_evlist__delete(evlist);
}
- if (cpus)
- cpu_map__delete(cpus);
- if (threads)
- thread_map__delete(threads);
return err;
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 6664a7c..983d6b8 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -45,7 +45,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
evsel = perf_evsel__new(&attr);
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
- goto out_free_evlist;
+ goto out_delete_evlist;
}
perf_evlist__add(evlist, evsel);
@@ -54,7 +54,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
if (!evlist->cpus || !evlist->threads) {
err = -ENOMEM;
pr_debug("Not enough memory to create thread/cpu maps\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
if (perf_evlist__open(evlist)) {
@@ -63,14 +63,14 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
err = -errno;
pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
strerror(errno), knob, (u64)attr.sample_freq);
- goto out_delete_maps;
+ goto out_delete_evlist;
}
err = perf_evlist__mmap(evlist, 128, true);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
strerror(errno));
- goto out_close_evlist;
+ goto out_delete_evlist;
}
perf_evlist__enable(evlist);
@@ -90,7 +90,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
pr_debug("Error during parse sample\n");
- goto out_unmap_evlist;
+ goto out_delete_evlist;
}
total_periods += sample.period;
@@ -105,13 +105,7 @@ next_event:
err = -1;
}
-out_unmap_evlist:
- perf_evlist__munmap(evlist);
-out_close_evlist:
- perf_evlist__close(evlist);
-out_delete_maps:
- perf_evlist__delete_maps(evlist);
-out_free_evlist:
+out_delete_evlist:
perf_evlist__delete(evlist);
return err;
}
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index d09ab57..5ff3db3 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -9,12 +9,21 @@
static int exited;
static int nr_exit;
-static void sig_handler(int sig)
+static void sig_handler(int sig __maybe_unused)
{
exited = 1;
+}
- if (sig == SIGUSR1)
- nr_exit = -1;
+/*
+ * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
+ * we asked by setting its exec_error to this handler.
+ */
+static void workload_exec_failed_signal(int signo __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *ucontext __maybe_unused)
+{
+ exited = 1;
+ nr_exit = -1;
}
/*
@@ -35,7 +44,6 @@ int test__task_exit(void)
const char *argv[] = { "true", NULL };
signal(SIGCHLD, sig_handler);
- signal(SIGUSR1, sig_handler);
evlist = perf_evlist__new_default();
if (evlist == NULL) {
@@ -54,13 +62,14 @@ int test__task_exit(void)
if (!evlist->cpus || !evlist->threads) {
err = -ENOMEM;
pr_debug("Not enough memory to create thread/cpu maps\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
- err = perf_evlist__prepare_workload(evlist, &target, argv, false, true);
+ err = perf_evlist__prepare_workload(evlist, &target, argv, false,
+ workload_exec_failed_signal);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
- goto out_delete_maps;
+ goto out_delete_evlist;
}
evsel = perf_evlist__first(evlist);
@@ -74,13 +83,13 @@ int test__task_exit(void)
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("Couldn't open the evlist: %s\n", strerror(-err));
- goto out_delete_maps;
+ goto out_delete_evlist;
}
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
strerror(errno));
- goto out_close_evlist;
+ goto out_delete_evlist;
}
perf_evlist__start_workload(evlist);
@@ -103,11 +112,7 @@ retry:
err = -1;
}
- perf_evlist__munmap(evlist);
-out_close_evlist:
- perf_evlist__close(evlist);
-out_delete_maps:
- perf_evlist__delete_maps(evlist);
+out_delete_evlist:
perf_evlist__delete(evlist);
return err;
}
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index cbaa7af..d11541d 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -256,8 +256,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
__ui_browser__show_title(browser, title);
browser->title = title;
- free(browser->helpline);
- browser->helpline = NULL;
+ zfree(&browser->helpline);
va_start(ap, helpline);
err = vasprintf(&browser->helpline, helpline, ap);
@@ -268,12 +267,11 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
return err ? 0 : -1;
}
-void ui_browser__hide(struct ui_browser *browser __maybe_unused)
+void ui_browser__hide(struct ui_browser *browser)
{
pthread_mutex_lock(&ui__lock);
ui_helpline__pop();
- free(browser->helpline);
- browser->helpline = NULL;
+ zfree(&browser->helpline);
pthread_mutex_unlock(&ui__lock);
}
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 7d45d2f..118cca2 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -59,6 +59,8 @@ int ui_browser__help_window(struct ui_browser *browser, const char *text);
bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
int ui_browser__input_window(const char *title, const char *text, char *input,
const char *exit_msg, int delay_sec);
+struct perf_session_env;
+int tui__header_window(struct perf_session_env *env);
void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
new file mode 100644
index 0000000..89c16b9
--- /dev/null
+++ b/tools/perf/ui/browsers/header.c
@@ -0,0 +1,127 @@
+#include "util/cache.h"
+#include "util/debug.h"
+#include "ui/browser.h"
+#include "ui/ui.h"
+#include "ui/util.h"
+#include "ui/libslang.h"
+#include "util/header.h"
+#include "util/session.h"
+
+static void ui_browser__argv_write(struct ui_browser *browser,
+ void *entry, int row)
+{
+ char **arg = entry;
+ char *str = *arg;
+ char empty[] = " ";
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+ unsigned long offset = (unsigned long)browser->priv;
+
+ if (offset >= strlen(str))
+ str = empty;
+ else
+ str = str + offset;
+
+ ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+ HE_COLORSET_NORMAL);
+
+ slsmg_write_nstring(str, browser->width);
+}
+
+static int list_menu__run(struct ui_browser *menu)
+{
+ int key;
+ unsigned long offset;
+ const char help[] =
+ "h/?/F1 Show this window\n"
+ "UP/DOWN/PGUP\n"
+ "PGDN/SPACE\n"
+ "LEFT/RIGHT Navigate\n"
+ "q/ESC/CTRL+C Exit browser";
+
+ if (ui_browser__show(menu, "Header information", "Press 'q' to exit") < 0)
+ return -1;
+
+ while (1) {
+ key = ui_browser__run(menu, 0);
+
+ switch (key) {
+ case K_RIGHT:
+ offset = (unsigned long)menu->priv;
+ offset += 10;
+ menu->priv = (void *)offset;
+ continue;
+ case K_LEFT:
+ offset = (unsigned long)menu->priv;
+ if (offset >= 10)
+ offset -= 10;
+ menu->priv = (void *)offset;
+ continue;
+ case K_F1:
+ case 'h':
+ case '?':
+ ui_browser__help_window(menu, help);
+ continue;
+ case K_ESC:
+ case 'q':
+ case CTRL('c'):
+ key = -1;
+ break;
+ default:
+ continue;
+ }
+
+ break;
+ }
+
+ ui_browser__hide(menu);
+ return key;
+}
+
+static int ui__list_menu(int argc, char * const argv[])
+{
+ struct ui_browser menu = {
+ .entries = (void *)argv,
+ .refresh = ui_browser__argv_refresh,
+ .seek = ui_browser__argv_seek,
+ .write = ui_browser__argv_write,
+ .nr_entries = argc,
+ };
+
+ return list_menu__run(&menu);
+}
+
+int tui__header_window(struct perf_session_env *env)
+{
+ int i, argc = 0;
+ char **argv;
+ struct perf_session *session;
+ char *ptr, *pos;
+ size_t size;
+ FILE *fp = open_memstream(&ptr, &size);
+
+ session = container_of(env, struct perf_session, header.env);
+ perf_header__fprintf_info(session, fp, true);
+ fclose(fp);
+
+ for (pos = ptr, argc = 0; (pos = strchr(pos, '\n')) != NULL; pos++)
+ argc++;
+
+ argv = calloc(argc + 1, sizeof(*argv));
+ if (argv == NULL)
+ goto out;
+
+ argv[0] = pos = ptr;
+ for (i = 1; (pos = strchr(pos, '\n')) != NULL; i++) {
+ *pos++ = '\0';
+ argv[i] = pos;
+ }
+
+ BUG_ON(i != argc + 1);
+
+ ui__list_menu(argc, argv);
+
+out:
+ free(argv);
+ free(ptr);
+ return 0;
+}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a440e03..b720b92 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1267,10 +1267,8 @@ static inline void free_popup_options(char **options, int n)
{
int i;
- for (i = 0; i < n; ++i) {
- free(options[i]);
- options[i] = NULL;
- }
+ for (i = 0; i < n; ++i)
+ zfree(&options[i]);
}
/* Check whether the browser is for 'top' or 'report' */
@@ -1329,7 +1327,7 @@ static int switch_data_file(void)
abs_path[nr_options] = strdup(path);
if (!abs_path[nr_options]) {
- free(options[nr_options]);
+ zfree(&options[nr_options]);
ui__warning("Can't search all data files due to memory shortage.\n");
fclose(file);
break;
@@ -1400,6 +1398,36 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
char script_opt[64];
int delay_secs = hbt ? hbt->refresh : 0;
+#define HIST_BROWSER_HELP_COMMON \
+ "h/?/F1 Show this window\n" \
+ "UP/DOWN/PGUP\n" \
+ "PGDN/SPACE Navigate\n" \
+ "q/ESC/CTRL+C Exit browser\n\n" \
+ "For multiple event sessions:\n\n" \
+ "TAB/UNTAB Switch events\n\n" \
+ "For symbolic views (--sort has sym):\n\n" \
+ "-> Zoom into DSO/Threads & Annotate current symbol\n" \
+ "<- Zoom out\n" \
+ "a Annotate current symbol\n" \
+ "C Collapse all callchains\n" \
+ "d Zoom into current DSO\n" \
+ "E Expand all callchains\n" \
+
+ /* help messages are sorted by lexical order of the hotkey */
+ const char report_help[] = HIST_BROWSER_HELP_COMMON
+ "i Show header information\n"
+ "P Print histograms to perf.hist.N\n"
+ "r Run available scripts\n"
+ "s Switch to another data file in PWD\n"
+ "t Zoom into current Thread\n"
+ "V Verbose (DSO names in callchains, etc)\n"
+ "/ Filter symbol by name";
+ const char top_help[] = HIST_BROWSER_HELP_COMMON
+ "P Print histograms to perf.hist.N\n"
+ "t Zoom into current Thread\n"
+ "V Verbose (DSO names in callchains, etc)\n"
+ "/ Filter symbol by name";
+
if (browser == NULL)
return -1;
@@ -1484,29 +1512,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
if (is_report_browser(hbt))
goto do_data_switch;
continue;
+ case 'i':
+ /* env->arch is NULL for live-mode (i.e. perf top) */
+ if (env->arch)
+ tui__header_window(env);
+ continue;
case K_F1:
case 'h':
case '?':
ui_browser__help_window(&browser->b,
- "h/?/F1 Show this window\n"
- "UP/DOWN/PGUP\n"
- "PGDN/SPACE Navigate\n"
- "q/ESC/CTRL+C Exit browser\n\n"
- "For multiple event sessions:\n\n"
- "TAB/UNTAB Switch events\n\n"
- "For symbolic views (--sort has sym):\n\n"
- "-> Zoom into DSO/Threads & Annotate current symbol\n"
- "<- Zoom out\n"
- "a Annotate current symbol\n"
- "C Collapse all callchains\n"
- "E Expand all callchains\n"
- "d Zoom into current DSO\n"
- "t Zoom into current Thread\n"
- "r Run available scripts('perf report' only)\n"
- "s Switch to another data file in PWD ('perf report' only)\n"
- "P Print histograms to perf.hist.N\n"
- "V Verbose (DSO names in callchains, etc)\n"
- "/ Filter symbol by name");
+ is_report_browser(hbt) ? report_help : top_help);
continue;
case K_ENTER:
case K_RIGHT:
@@ -1923,7 +1938,7 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
ui_helpline__push("Press ESC to exit");
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
const char *ev_name = perf_evsel__name(pos);
size_t line_len = strlen(ev_name) + 7;
@@ -1955,9 +1970,10 @@ single_entry:
struct perf_evsel *pos;
nr_entries = 0;
- list_for_each_entry(pos, &evlist->entries, node)
+ evlist__for_each(evlist, pos) {
if (perf_evsel__is_group_leader(pos))
nr_entries++;
+ }
if (nr_entries == 1)
goto single_entry;
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index d63c68e..402d2bd 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -173,8 +173,7 @@ int script_browse(const char *script_opt)
if (script.b.width > AVERAGE_LINE_LEN)
script.b.width = AVERAGE_LINE_LEN;
- if (line)
- free(line);
+ free(line);
pclose(fp);
script.nr_lines = nr_entries;
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 2ca66cc..5b95c44 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -375,7 +375,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
gtk_container_add(GTK_CONTAINER(window), vbox);
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
struct hists *hists = &pos->hists;
const char *evname = perf_evsel__name(pos);
GtkWidget *scrolled_window;
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
index 696c1fb..52e7fc4 100644
--- a/tools/perf/ui/gtk/util.c
+++ b/tools/perf/ui/gtk/util.c
@@ -23,8 +23,7 @@ int perf_gtk__deactivate_context(struct perf_gtk_context **ctx)
if (!perf_gtk__is_active_context(*ctx))
return -1;
- free(*ctx);
- *ctx = NULL;
+ zfree(ctx);
return 0;
}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index c244cb5..831fbb7 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -510,7 +510,7 @@ print_entries:
free(line);
out:
- free(rem_sq_bracket);
+ zfree(&rem_sq_bracket);
return ret;
}
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
index 092902e..bf890f7 100644
--- a/tools/perf/ui/tui/util.c
+++ b/tools/perf/ui/tui/util.c
@@ -92,6 +92,8 @@ int ui_browser__input_window(const char *title, const char *text, char *input,
t = sep + 1;
}
+ pthread_mutex_lock(&ui__lock);
+
max_len += 2;
nr_lines += 8;
y = SLtt_Screen_Rows / 2 - nr_lines / 2;
@@ -120,13 +122,19 @@ int ui_browser__input_window(const char *title, const char *text, char *input,
SLsmg_write_nstring((char *)exit_msg, max_len);
SLsmg_refresh();
+ pthread_mutex_unlock(&ui__lock);
+
x += 2;
len = 0;
key = ui__getch(delay_secs);
while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+ pthread_mutex_lock(&ui__lock);
+
if (key == K_BKSPC) {
- if (len == 0)
+ if (len == 0) {
+ pthread_mutex_unlock(&ui__lock);
goto next_key;
+ }
SLsmg_gotorc(y, x + --len);
SLsmg_write_char(' ');
} else {
@@ -136,6 +144,8 @@ int ui_browser__input_window(const char *title, const char *text, char *input,
}
SLsmg_refresh();
+ pthread_mutex_unlock(&ui__lock);
+
/* XXX more graceful overflow handling needed */
if (len == sizeof(buf) - 1) {
ui_helpline__push("maximum size of symbol name reached!");
@@ -174,6 +184,8 @@ int ui__question_window(const char *title, const char *text,
t = sep + 1;
}
+ pthread_mutex_lock(&ui__lock);
+
max_len += 2;
nr_lines += 4;
y = SLtt_Screen_Rows / 2 - nr_lines / 2,
@@ -195,6 +207,9 @@ int ui__question_window(const char *title, const char *text,
SLsmg_gotorc(y + nr_lines - 1, x);
SLsmg_write_nstring((char *)exit_msg, max_len);
SLsmg_refresh();
+
+ pthread_mutex_unlock(&ui__lock);
+
return ui__getch(delay_secs);
}
@@ -215,9 +230,7 @@ static int __ui__warning(const char *title, const char *format, va_list args)
if (vasprintf(&s, format, args) > 0) {
int key;
- pthread_mutex_lock(&ui__lock);
key = ui__question_window(title, s, "Press any key...", 0);
- pthread_mutex_unlock(&ui__lock);
free(s);
return key;
}
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
index e6d1347..c0b43ee 100644
--- a/tools/perf/util/alias.c
+++ b/tools/perf/util/alias.c
@@ -55,8 +55,7 @@ int split_cmdline(char *cmdline, const char ***argv)
src++;
c = cmdline[src];
if (!c) {
- free(*argv);
- *argv = NULL;
+ zfree(argv);
return error("cmdline ends with \\");
}
}
@@ -68,8 +67,7 @@ int split_cmdline(char *cmdline, const char ***argv)
cmdline[dst] = 0;
if (quoted) {
- free(*argv);
- *argv = NULL;
+ zfree(argv);
return error("unclosed quote");
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index cf6242c..469eb67 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -26,10 +26,10 @@ static int disasm_line__parse(char *line, char **namep, char **rawp);
static void ins__delete(struct ins_operands *ops)
{
- free(ops->source.raw);
- free(ops->source.name);
- free(ops->target.raw);
- free(ops->target.name);
+ zfree(&ops->source.raw);
+ zfree(&ops->source.name);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
@@ -185,8 +185,7 @@ static int lock__parse(struct ins_operands *ops)
return 0;
out_free_ops:
- free(ops->locked.ops);
- ops->locked.ops = NULL;
+ zfree(&ops->locked.ops);
return 0;
}
@@ -205,9 +204,9 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
static void lock__delete(struct ins_operands *ops)
{
- free(ops->locked.ops);
- free(ops->target.raw);
- free(ops->target.name);
+ zfree(&ops->locked.ops);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
}
static struct ins_ops lock_ops = {
@@ -256,8 +255,7 @@ static int mov__parse(struct ins_operands *ops)
return 0;
out_free_source:
- free(ops->source.raw);
- ops->source.raw = NULL;
+ zfree(&ops->source.raw);
return -1;
}
@@ -464,17 +462,12 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
pthread_mutex_unlock(&notes->lock);
}
-int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
- int evidx, u64 addr)
+static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ struct annotation *notes, int evidx, u64 addr)
{
unsigned offset;
- struct annotation *notes;
struct sym_hist *h;
- notes = symbol__annotation(sym);
- if (notes->src == NULL)
- return -ENOMEM;
-
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
if (addr < sym->start || addr > sym->end)
@@ -491,6 +484,33 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
return 0;
}
+static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr)
+{
+ struct annotation *notes;
+
+ if (sym == NULL || use_browser != 1 || !sort__has_sym)
+ return 0;
+
+ notes = symbol__annotation(sym);
+ if (notes->src == NULL) {
+ if (symbol__alloc_hist(sym) < 0)
+ return -ENOMEM;
+ }
+
+ return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
+}
+
+int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
+{
+ return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
+}
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
+{
+ return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
+}
+
static void disasm_line__init_ins(struct disasm_line *dl)
{
dl->ins = ins__find(dl->name);
@@ -538,8 +558,7 @@ static int disasm_line__parse(char *line, char **namep, char **rawp)
return 0;
out_free_name:
- free(*namep);
- *namep = NULL;
+ zfree(namep);
return -1;
}
@@ -564,7 +583,7 @@ static struct disasm_line *disasm_line__new(s64 offset, char *line, size_t privs
return dl;
out_free_line:
- free(dl->line);
+ zfree(&dl->line);
out_delete:
free(dl);
return NULL;
@@ -572,8 +591,8 @@ out_delete:
void disasm_line__free(struct disasm_line *dl)
{
- free(dl->line);
- free(dl->name);
+ zfree(&dl->line);
+ zfree(&dl->name);
if (dl->ins && dl->ins->ops->free)
dl->ins->ops->free(&dl->ops);
else
@@ -900,7 +919,7 @@ fallback:
* cache, or is just a kallsyms file, well, lets hope that this
* DSO is the same as when 'perf record' ran.
*/
- filename = dso->long_name;
+ filename = (char *)dso->long_name;
snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
symbol_conf.symfs, filename);
free_filename = false;
@@ -1091,8 +1110,7 @@ static void symbol__free_source_line(struct symbol *sym, int len)
src_line = (void *)src_line + sizeof_src_line;
}
- free(notes->src->lines);
- notes->src->lines = NULL;
+ zfree(&notes->src->lines);
}
/* Get the filename:line for the colored entries */
@@ -1376,3 +1394,8 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
return 0;
}
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize)
+{
+ return symbol__annotate(he->ms.sym, he->ms.map, privsize);
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 834b7b5..b2aef59 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -132,12 +132,17 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
return &a->annotation;
}
-int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
- int evidx, u64 addr);
+int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
+
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize);
+
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym);
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index a92770c..6baabe6 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -89,7 +89,7 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf)
return raw - build_id;
}
-char *dso__build_id_filename(struct dso *dso, char *bf, size_t size)
+char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
{
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 929f28a..845ef86 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -10,7 +10,7 @@ extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
int build_id__sprintf(const u8 *build_id, int len, char *bf);
-char *dso__build_id_filename(struct dso *dso, char *bf, size_t size);
+char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index e3970e3..8d9db45 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -15,8 +15,12 @@
#include <errno.h>
#include <math.h>
+#include "asm/bug.h"
+
#include "hist.h"
#include "util.h"
+#include "sort.h"
+#include "machine.h"
#include "callchain.h"
__thread struct callchain_cursor callchain_cursor;
@@ -356,19 +360,14 @@ append_chain_children(struct callchain_node *root,
/* lookup in childrens */
while (*p) {
s64 ret;
- struct callchain_list *cnode;
parent = *p;
rnode = rb_entry(parent, struct callchain_node, rb_node_in);
- cnode = list_first_entry(&rnode->val, struct callchain_list,
- list);
- /* just check first entry */
- ret = match_chain(node, cnode);
- if (ret == 0) {
- append_chain(rnode, cursor, period);
+ /* If at least first entry matches, rely to children */
+ ret = append_chain(rnode, cursor, period);
+ if (ret == 0)
goto inc_children_hit;
- }
if (ret < 0)
p = &parent->rb_left;
@@ -389,11 +388,11 @@ append_chain(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period)
{
- struct callchain_cursor_node *curr_snap = cursor->curr;
struct callchain_list *cnode;
u64 start = cursor->pos;
bool found = false;
u64 matches;
+ int cmp = 0;
/*
* Lookup in the current node
@@ -408,7 +407,8 @@ append_chain(struct callchain_node *root,
if (!node)
break;
- if (match_chain(node, cnode) != 0)
+ cmp = match_chain(node, cnode);
+ if (cmp)
break;
found = true;
@@ -418,9 +418,8 @@ append_chain(struct callchain_node *root,
/* matches not, relay no the parent */
if (!found) {
- cursor->curr = curr_snap;
- cursor->pos = start;
- return -1;
+ WARN_ONCE(!cmp, "Chain comparison error\n");
+ return cmp;
}
matches = cursor->pos - start;
@@ -531,3 +530,24 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
return 0;
}
+
+int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+ struct perf_evsel *evsel, struct addr_location *al,
+ int max_stack)
+{
+ if (sample->callchain == NULL)
+ return 0;
+
+ if (symbol_conf.use_callchain || sort__has_parent) {
+ return machine__resolve_callchain(al->machine, evsel, al->thread,
+ sample, parent, al, max_stack);
+ }
+ return 0;
+}
+
+int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
+{
+ if (!symbol_conf.use_callchain)
+ return 0;
+ return callchain_append(he->callchain, &callchain_cursor, sample->period);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 4f7f989..8ad97e9 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -145,10 +145,16 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
}
struct option;
+struct hist_entry;
-int record_parse_callchain(const char *arg, struct perf_record_opts *opts);
+int record_parse_callchain(const char *arg, struct record_opts *opts);
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
int record_callchain_opt(const struct option *opt, const char *arg, int unset);
+int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+ struct perf_evsel *evsel, struct addr_location *al,
+ int max_stack);
+int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
+
extern const char record_callchain_help[];
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 96bbda1..88f7be3 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -81,7 +81,7 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
/*
* check if cgrp is already defined, if so we reuse it
*/
- list_for_each_entry(counter, &evlist->entries, node) {
+ evlist__for_each(evlist, counter) {
cgrp = counter->cgrp;
if (!cgrp)
continue;
@@ -110,7 +110,7 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
* if add cgroup N, then need to find event N
*/
n = 0;
- list_for_each_entry(counter, &evlist->entries, node) {
+ evlist__for_each(evlist, counter) {
if (n == nr_cgroups)
goto found;
n++;
@@ -133,7 +133,7 @@ void close_cgroup(struct cgroup_sel *cgrp)
/* XXX: not reentrant */
if (--cgrp->refcnt == 0) {
close(cgrp->fd);
- free(cgrp->name);
+ zfree(&cgrp->name);
free(cgrp);
}
}
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 66e44a5..87b8672 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -1,6 +1,7 @@
#include <linux/kernel.h>
#include "cache.h"
#include "color.h"
+#include <math.h>
int perf_use_color_default = -1;
@@ -298,10 +299,10 @@ const char *get_percent_color(double percent)
* entries in green - and keep the low overhead places
* normal:
*/
- if (percent >= MIN_RED)
+ if (fabs(percent) >= MIN_RED)
color = PERF_COLOR_RED;
else {
- if (percent > MIN_GREEN)
+ if (fabs(percent) > MIN_GREEN)
color = PERF_COLOR_GREEN;
}
return color;
@@ -318,15 +319,19 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
return r;
}
+int value_color_snprintf(char *bf, size_t size, const char *fmt, double value)
+{
+ const char *color = get_percent_color(value);
+ return color_snprintf(bf, size, color, fmt, value);
+}
+
int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
{
va_list args;
double percent;
- const char *color;
va_start(args, fmt);
percent = va_arg(args, double);
va_end(args);
- color = get_percent_color(percent);
- return color_snprintf(bf, size, color, fmt, percent);
+ return value_color_snprintf(bf, size, fmt, percent);
}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index fced384..7ff30a6 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -39,6 +39,7 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+int value_color_snprintf(char *bf, size_t size, const char *fmt, double value);
int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
const char *get_percent_color(double percent);
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index ee0df0e..f9e7776 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -21,7 +21,7 @@ static void comm_str__put(struct comm_str *cs)
{
if (!--cs->ref) {
rb_erase(&cs->rb_node, &comm_str_root);
- free(cs->str);
+ zfree(&cs->str);
free(cs);
}
}
@@ -94,19 +94,20 @@ struct comm *comm__new(const char *str, u64 timestamp)
return comm;
}
-void comm__override(struct comm *comm, const char *str, u64 timestamp)
+int comm__override(struct comm *comm, const char *str, u64 timestamp)
{
- struct comm_str *old = comm->comm_str;
+ struct comm_str *new, *old = comm->comm_str;
- comm->comm_str = comm_str__findnew(str, &comm_str_root);
- if (!comm->comm_str) {
- comm->comm_str = old;
- return;
- }
+ new = comm_str__findnew(str, &comm_str_root);
+ if (!new)
+ return -ENOMEM;
- comm->start = timestamp;
- comm_str__get(comm->comm_str);
+ comm_str__get(new);
comm_str__put(old);
+ comm->comm_str = new;
+ comm->start = timestamp;
+
+ return 0;
}
void comm__free(struct comm *comm)
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index 7a86e56..fac5bd5 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -16,6 +16,6 @@ struct comm {
void comm__free(struct comm *comm);
struct comm *comm__new(const char *str, u64 timestamp);
const char *comm__str(const struct comm *comm);
-void comm__override(struct comm *comm, const char *str, u64 timestamp);
+int comm__override(struct comm *comm, const char *str, u64 timestamp);
#endif /* __PERF_COMM_H */
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 7d09faf..1fbcd8b 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -118,3 +118,9 @@ void perf_data_file__close(struct perf_data_file *file)
{
close(file->fd);
}
+
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size)
+{
+ return writen(file->fd, buf, size);
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 8c2df80..2b15d0c 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -9,12 +9,12 @@ enum perf_data_mode {
};
struct perf_data_file {
- const char *path;
- int fd;
- bool is_pipe;
- bool force;
- unsigned long size;
- enum perf_data_mode mode;
+ const char *path;
+ int fd;
+ bool is_pipe;
+ bool force;
+ unsigned long size;
+ enum perf_data_mode mode;
};
static inline bool perf_data_file__is_read(struct perf_data_file *file)
@@ -44,5 +44,7 @@ static inline unsigned long perf_data_file__size(struct perf_data_file *file)
int perf_data_file__open(struct perf_data_file *file);
void perf_data_file__close(struct perf_data_file *file);
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 399e74c..299b555 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -16,23 +16,46 @@
int verbose;
bool dump_trace = false, quiet = false;
-int eprintf(int level, const char *fmt, ...)
+static int _eprintf(int level, const char *fmt, va_list args)
{
- va_list args;
int ret = 0;
if (verbose >= level) {
- va_start(args, fmt);
if (use_browser >= 1)
ui_helpline__vshow(fmt, args);
else
ret = vfprintf(stderr, fmt, args);
- va_end(args);
}
return ret;
}
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = _eprintf(level, fmt, args);
+ va_end(args);
+
+ return ret;
+}
+
+/*
+ * Overloading libtraceevent standard info print
+ * function, display with -v in perf.
+ */
+void pr_stat(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ _eprintf(1, fmt, args);
+ va_end(args);
+ eprintf(1, "\n");
+}
+
int dump_printf(const char *fmt, ...)
{
va_list args;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index efbd988..443694c 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -17,4 +17,6 @@ void trace_event(union perf_event *event);
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
+void pr_stat(const char *fmt, ...);
+
#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index af4c687c..4045d08 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -28,8 +28,9 @@ char dso__symtab_origin(const struct dso *dso)
return origin[dso->symtab_type];
}
-int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
- char *root_dir, char *file, size_t size)
+int dso__read_binary_type_filename(const struct dso *dso,
+ enum dso_binary_type type,
+ char *root_dir, char *filename, size_t size)
{
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
int ret = 0;
@@ -38,36 +39,36 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
case DSO_BINARY_TYPE__DEBUGLINK: {
char *debuglink;
- strncpy(file, dso->long_name, size);
- debuglink = file + dso->long_name_len;
- while (debuglink != file && *debuglink != '/')
+ strncpy(filename, dso->long_name, size);
+ debuglink = filename + dso->long_name_len;
+ while (debuglink != filename && *debuglink != '/')
debuglink--;
if (*debuglink == '/')
debuglink++;
filename__read_debuglink(dso->long_name, debuglink,
- size - (debuglink - file));
+ size - (debuglink - filename));
}
break;
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
/* skip the locally configured cache if a symfs is given */
if (symbol_conf.symfs[0] ||
- (dso__build_id_filename(dso, file, size) == NULL))
+ (dso__build_id_filename(dso, filename, size) == NULL))
ret = -1;
break;
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
- snprintf(file, size, "%s/usr/lib/debug%s.debug",
+ snprintf(filename, size, "%s/usr/lib/debug%s.debug",
symbol_conf.symfs, dso->long_name);
break;
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
- snprintf(file, size, "%s/usr/lib/debug%s",
+ snprintf(filename, size, "%s/usr/lib/debug%s",
symbol_conf.symfs, dso->long_name);
break;
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
{
- char *last_slash;
+ const char *last_slash;
size_t len;
size_t dir_size;
@@ -75,14 +76,14 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
while (last_slash != dso->long_name && *last_slash != '/')
last_slash--;
- len = scnprintf(file, size, "%s", symbol_conf.symfs);
+ len = scnprintf(filename, size, "%s", symbol_conf.symfs);
dir_size = last_slash - dso->long_name + 2;
if (dir_size > (size - len)) {
ret = -1;
break;
}
- len += scnprintf(file + len, dir_size, "%s", dso->long_name);
- len += scnprintf(file + len , size - len, ".debug%s",
+ len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
+ len += scnprintf(filename + len , size - len, ".debug%s",
last_slash);
break;
}
@@ -96,7 +97,7 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
build_id__sprintf(dso->build_id,
sizeof(dso->build_id),
build_id_hex);
- snprintf(file, size,
+ snprintf(filename, size,
"%s/usr/lib/debug/.build-id/%.2s/%s.debug",
symbol_conf.symfs, build_id_hex, build_id_hex + 2);
break;
@@ -104,23 +105,23 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
- snprintf(file, size, "%s%s",
+ snprintf(filename, size, "%s%s",
symbol_conf.symfs, dso->long_name);
break;
case DSO_BINARY_TYPE__GUEST_KMODULE:
- snprintf(file, size, "%s%s%s", symbol_conf.symfs,
+ snprintf(filename, size, "%s%s%s", symbol_conf.symfs,
root_dir, dso->long_name);
break;
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
- snprintf(file, size, "%s%s", symbol_conf.symfs,
+ snprintf(filename, size, "%s%s", symbol_conf.symfs,
dso->long_name);
break;
case DSO_BINARY_TYPE__KCORE:
case DSO_BINARY_TYPE__GUEST_KCORE:
- snprintf(file, size, "%s", dso->long_name);
+ snprintf(filename, size, "%s", dso->long_name);
break;
default:
@@ -137,19 +138,18 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
static int open_dso(struct dso *dso, struct machine *machine)
{
- char *root_dir = (char *) "";
- char *name;
int fd;
+ char *root_dir = (char *)"";
+ char *name = malloc(PATH_MAX);
- name = malloc(PATH_MAX);
if (!name)
return -ENOMEM;
if (machine)
root_dir = machine->root_dir;
- if (dso__binary_type_file(dso, dso->data_type,
- root_dir, name, PATH_MAX)) {
+ if (dso__read_binary_type_filename(dso, dso->binary_type,
+ root_dir, name, PATH_MAX)) {
free(name);
return -EINVAL;
}
@@ -161,26 +161,26 @@ static int open_dso(struct dso *dso, struct machine *machine)
int dso__data_fd(struct dso *dso, struct machine *machine)
{
- static enum dso_binary_type binary_type_data[] = {
+ enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__BUILD_ID_CACHE,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__NOT_FOUND,
};
int i = 0;
- if (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND)
+ if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND)
return open_dso(dso, machine);
do {
int fd;
- dso->data_type = binary_type_data[i++];
+ dso->binary_type = binary_type_data[i++];
fd = open_dso(dso, machine);
if (fd >= 0)
return fd;
- } while (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND);
+ } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
return -EINVAL;
}
@@ -200,11 +200,10 @@ dso_cache__free(struct rb_root *root)
}
}
-static struct dso_cache*
-dso_cache__find(struct rb_root *root, u64 offset)
+static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node * const *p = &root->rb_node;
+ const struct rb_node *parent = NULL;
struct dso_cache *cache;
while (*p != NULL) {
@@ -379,32 +378,63 @@ struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
* processing we had no idea this was the kernel dso.
*/
if (dso != NULL) {
- dso__set_short_name(dso, short_name);
+ dso__set_short_name(dso, short_name, false);
dso->kernel = dso_type;
}
return dso;
}
-void dso__set_long_name(struct dso *dso, char *name)
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
{
if (name == NULL)
return;
- dso->long_name = name;
- dso->long_name_len = strlen(name);
+
+ if (dso->long_name_allocated)
+ free((char *)dso->long_name);
+
+ dso->long_name = name;
+ dso->long_name_len = strlen(name);
+ dso->long_name_allocated = name_allocated;
}
-void dso__set_short_name(struct dso *dso, const char *name)
+void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
{
if (name == NULL)
return;
- dso->short_name = name;
- dso->short_name_len = strlen(name);
+
+ if (dso->short_name_allocated)
+ free((char *)dso->short_name);
+
+ dso->short_name = name;
+ dso->short_name_len = strlen(name);
+ dso->short_name_allocated = name_allocated;
}
static void dso__set_basename(struct dso *dso)
{
- dso__set_short_name(dso, basename(dso->long_name));
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ char *base, *lname = strdup(dso->long_name);
+
+ if (!lname)
+ return;
+
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
+
+ free(lname);
+
+ if (!base)
+ return;
+
+ dso__set_short_name(dso, base, true);
}
int dso__name_len(const struct dso *dso)
@@ -439,18 +469,19 @@ struct dso *dso__new(const char *name)
if (dso != NULL) {
int i;
strcpy(dso->name, name);
- dso__set_long_name(dso, dso->name);
- dso__set_short_name(dso, dso->name);
+ dso__set_long_name(dso, dso->name, false);
+ dso__set_short_name(dso, dso->name, false);
for (i = 0; i < MAP__NR_TYPES; ++i)
dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
dso->cache = RB_ROOT;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
- dso->data_type = DSO_BINARY_TYPE__NOT_FOUND;
+ dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->loaded = 0;
dso->rel = 0;
dso->sorted_by_name = 0;
dso->has_build_id = 0;
dso->has_srcline = 1;
+ dso->a2l_fails = 1;
dso->kernel = DSO_TYPE_USER;
dso->needs_swap = DSO_SWAP__UNSET;
INIT_LIST_HEAD(&dso->node);
@@ -464,11 +495,20 @@ void dso__delete(struct dso *dso)
int i;
for (i = 0; i < MAP__NR_TYPES; ++i)
symbols__delete(&dso->symbols[i]);
- if (dso->sname_alloc)
- free((char *)dso->short_name);
- if (dso->lname_alloc)
- free(dso->long_name);
+
+ if (dso->short_name_allocated) {
+ zfree((char **)&dso->short_name);
+ dso->short_name_allocated = false;
+ }
+
+ if (dso->long_name_allocated) {
+ zfree((char **)&dso->long_name);
+ dso->long_name_allocated = false;
+ }
+
dso_cache__free(&dso->cache);
+ dso__free_a2l(dso);
+ zfree(&dso->symsrc_filename);
free(dso);
}
@@ -543,7 +583,7 @@ void dsos__add(struct list_head *head, struct dso *dso)
list_add_tail(&dso->node, head);
}
-struct dso *dsos__find(struct list_head *head, const char *name, bool cmp_short)
+struct dso *dsos__find(const struct list_head *head, const char *name, bool cmp_short)
{
struct dso *pos;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 9ac666a..cd7d6f0 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -77,23 +77,26 @@ struct dso {
struct rb_root symbols[MAP__NR_TYPES];
struct rb_root symbol_names[MAP__NR_TYPES];
struct rb_root cache;
+ void *a2l;
+ char *symsrc_filename;
+ unsigned int a2l_fails;
enum dso_kernel_type kernel;
enum dso_swap_type needs_swap;
enum dso_binary_type symtab_type;
- enum dso_binary_type data_type;
+ enum dso_binary_type binary_type;
u8 adjust_symbols:1;
u8 has_build_id:1;
u8 has_srcline:1;
u8 hit:1;
u8 annotate_warned:1;
- u8 sname_alloc:1;
- u8 lname_alloc:1;
+ u8 short_name_allocated:1;
+ u8 long_name_allocated:1;
u8 sorted_by_name;
u8 loaded;
u8 rel;
u8 build_id[BUILD_ID_SIZE];
const char *short_name;
- char *long_name;
+ const char *long_name;
u16 long_name_len;
u16 short_name_len;
char name[0];
@@ -107,8 +110,8 @@ static inline void dso__set_loaded(struct dso *dso, enum map_type type)
struct dso *dso__new(const char *name);
void dso__delete(struct dso *dso);
-void dso__set_short_name(struct dso *dso, const char *name);
-void dso__set_long_name(struct dso *dso, char *name);
+void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
int dso__name_len(const struct dso *dso);
@@ -125,8 +128,8 @@ void dso__read_running_kernel_build_id(struct dso *dso,
int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
char dso__symtab_origin(const struct dso *dso);
-int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
- char *root_dir, char *file, size_t size);
+int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
+ char *root_dir, char *filename, size_t size);
int dso__data_fd(struct dso *dso, struct machine *machine);
ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
@@ -140,7 +143,7 @@ struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
const char *short_name, int dso_type);
void dsos__add(struct list_head *head, struct dso *dso);
-struct dso *dsos__find(struct list_head *head, const char *name,
+struct dso *dsos__find(const struct list_head *head, const char *name,
bool cmp_short);
struct dso *__dsos__findnew(struct list_head *head, const char *name);
bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
@@ -156,14 +159,16 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
static inline bool dso__is_vmlinux(struct dso *dso)
{
- return dso->data_type == DSO_BINARY_TYPE__VMLINUX ||
- dso->data_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
+ return dso->binary_type == DSO_BINARY_TYPE__VMLINUX ||
+ dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
}
static inline bool dso__is_kcore(struct dso *dso)
{
- return dso->data_type == DSO_BINARY_TYPE__KCORE ||
- dso->data_type == DSO_BINARY_TYPE__GUEST_KCORE;
+ return dso->binary_type == DSO_BINARY_TYPE__KCORE ||
+ dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
}
+void dso__free_a2l(struct dso *dso);
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index bb788c1..1fc1c2f 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -7,6 +7,7 @@
#include "strlist.h"
#include "thread.h"
#include "thread_map.h"
+#include "symbol/kallsyms.h"
static const char *perf_event__names[] = {
[0] = "TOTAL",
@@ -105,8 +106,12 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
memset(&event->comm, 0, sizeof(event->comm));
- tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
- sizeof(event->comm.comm));
+ if (machine__is_host(machine))
+ tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+ else
+ tgid = machine->pid;
+
if (tgid < 0)
goto out;
@@ -128,7 +133,11 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
goto out;
}
- snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+ machine->root_dir, pid);
tasks = opendir(filename);
if (tasks == NULL) {
@@ -166,18 +175,22 @@ out:
return tgid;
}
-static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data)
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
{
char filename[PATH_MAX];
FILE *fp;
int rc = 0;
- snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
+ machine->root_dir, pid);
fp = fopen(filename, "r");
if (fp == NULL) {
@@ -217,7 +230,10 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
- event->header.misc = PERF_RECORD_MISC_USER;
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_USER;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_USER;
if (prot[2] != 'x') {
if (!mmap_data || prot[0] != 'r')
@@ -386,6 +402,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
struct machine *machine, bool mmap_data)
{
DIR *proc;
+ char proc_path[PATH_MAX];
struct dirent dirent, *next;
union perf_event *comm_event, *mmap_event;
int err = -1;
@@ -398,7 +415,12 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (mmap_event == NULL)
goto out_free_comm;
- proc = opendir("/proc");
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
+ proc = opendir(proc_path);
+
if (proc == NULL)
goto out_free_mmap;
@@ -637,6 +659,7 @@ void thread__find_addr_map(struct thread *thread,
struct map_groups *mg = &thread->mg;
bool load_map = false;
+ al->machine = machine;
al->thread = thread;
al->addr = addr;
al->cpumode = cpumode;
@@ -657,15 +680,10 @@ void thread__find_addr_map(struct thread *thread,
al->level = 'g';
mg = &machine->kmaps;
load_map = true;
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
+ al->level = 'u';
} else {
- /*
- * 'u' means guest os user space.
- * TODO: We don't support guest user space. Might support late.
- */
- if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
- al->level = 'u';
- else
- al->level = 'H';
+ al->level = 'H';
al->map = NULL;
if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
@@ -732,8 +750,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
if (thread == NULL)
return -1;
- if (symbol_conf.comm_list &&
- !strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread)))
+ if (thread__is_filtered(thread))
goto out_filtered;
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 30fec99..faf6e21 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -266,6 +266,13 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
const struct perf_sample *sample,
bool swapped);
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data);
+
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index bbc746a..40bd2c0 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -7,7 +7,7 @@
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "util.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include <poll.h>
#include "cpumap.h"
#include "thread_map.h"
@@ -81,7 +81,7 @@ static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node)
+ evlist__for_each(evlist, evsel)
perf_evsel__calc_id_pos(evsel);
perf_evlist__set_id_pos(evlist);
@@ -91,7 +91,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
{
struct perf_evsel *pos, *n;
- list_for_each_entry_safe(pos, n, &evlist->entries, node) {
+ evlist__for_each_safe(evlist, n, pos) {
list_del_init(&pos->node);
perf_evsel__delete(pos);
}
@@ -101,14 +101,18 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
- free(evlist->mmap);
- free(evlist->pollfd);
- evlist->mmap = NULL;
- evlist->pollfd = NULL;
+ zfree(&evlist->mmap);
+ zfree(&evlist->pollfd);
}
void perf_evlist__delete(struct perf_evlist *evlist)
{
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ cpu_map__delete(evlist->cpus);
+ thread_map__delete(evlist->threads);
+ evlist->cpus = NULL;
+ evlist->threads = NULL;
perf_evlist__purge(evlist);
perf_evlist__exit(evlist);
free(evlist);
@@ -144,7 +148,7 @@ void __perf_evlist__set_leader(struct list_head *list)
leader->nr_members = evsel->idx - leader->idx + 1;
- list_for_each_entry(evsel, list, node) {
+ __evlist__for_each(list, evsel) {
evsel->leader = leader;
}
}
@@ -203,7 +207,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
return 0;
out_delete_partial_list:
- list_for_each_entry_safe(evsel, n, &head, node)
+ __evlist__for_each_safe(&head, n, evsel)
perf_evsel__delete(evsel);
return -1;
}
@@ -224,7 +228,7 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
(int)evsel->attr.config == id)
return evsel;
@@ -239,7 +243,7 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
(strcmp(evsel->name, name) == 0))
return evsel;
@@ -269,7 +273,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
int nr_threads = thread_map__nr(evlist->threads);
for (cpu = 0; cpu < nr_cpus; cpu++) {
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
@@ -287,7 +291,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
int nr_threads = thread_map__nr(evlist->threads);
for (cpu = 0; cpu < nr_cpus; cpu++) {
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
@@ -584,11 +588,13 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
{
int i;
+ if (evlist->mmap == NULL)
+ return;
+
for (i = 0; i < evlist->nr_mmaps; i++)
__perf_evlist__munmap(evlist, i);
- free(evlist->mmap);
- evlist->mmap = NULL;
+ zfree(&evlist->mmap);
}
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
@@ -624,7 +630,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
int fd = FD(evsel, cpu, thread);
if (*output == -1) {
@@ -732,11 +738,13 @@ static long parse_pages_arg(const char *str, unsigned long min,
return -EINVAL;
}
- if ((pages == 0) && (min == 0)) {
+ if (pages == 0 && min == 0) {
/* leave number of pages at 0 */
- } else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
+ } else if (!is_power_of_2(pages)) {
/* round pages up to next power of 2 */
- pages = next_pow2(pages);
+ pages = next_pow2_l(pages);
+ if (!pages)
+ return -EINVAL;
pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
pages * page_size, pages);
}
@@ -754,7 +762,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
unsigned long max = UINT_MAX;
long pages;
- if (max < SIZE_MAX / page_size)
+ if (max > SIZE_MAX / page_size)
max = SIZE_MAX / page_size;
pages = parse_pages_arg(str, 1, max);
@@ -798,7 +806,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
pr_debug("mmap size %zuB\n", evlist->mmap_len);
mask = evlist->mmap_len - page_size - 1;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
@@ -819,11 +827,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
if (evlist->threads == NULL)
return -1;
- if (target->force_per_cpu)
- evlist->cpus = cpu_map__new(target->cpu_list);
- else if (target__has_task(target))
- evlist->cpus = cpu_map__dummy_new();
- else if (!target__has_cpu(target) && !target->uses_mmap)
+ if (target__uses_dummy_map(target))
evlist->cpus = cpu_map__dummy_new();
else
evlist->cpus = cpu_map__new(target->cpu_list);
@@ -838,14 +842,6 @@ out_delete_threads:
return -1;
}
-void perf_evlist__delete_maps(struct perf_evlist *evlist)
-{
- cpu_map__delete(evlist->cpus);
- thread_map__delete(evlist->threads);
- evlist->cpus = NULL;
- evlist->threads = NULL;
-}
-
int perf_evlist__apply_filters(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
@@ -853,7 +849,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
const int ncpus = cpu_map__nr(evlist->cpus),
nthreads = thread_map__nr(evlist->threads);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (evsel->filter == NULL)
continue;
@@ -872,7 +868,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
const int ncpus = cpu_map__nr(evlist->cpus),
nthreads = thread_map__nr(evlist->threads);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
if (err)
break;
@@ -891,7 +887,7 @@ bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
if (evlist->id_pos < 0 || evlist->is_pos < 0)
return false;
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
if (pos->id_pos != evlist->id_pos ||
pos->is_pos != evlist->is_pos)
return false;
@@ -907,7 +903,7 @@ u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
if (evlist->combined_sample_type)
return evlist->combined_sample_type;
- list_for_each_entry(evsel, &evlist->entries, node)
+ evlist__for_each(evlist, evsel)
evlist->combined_sample_type |= evsel->attr.sample_type;
return evlist->combined_sample_type;
@@ -925,7 +921,7 @@ bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
u64 read_format = first->attr.read_format;
u64 sample_type = first->attr.sample_type;
- list_for_each_entry_continue(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
if (read_format != pos->attr.read_format)
return false;
}
@@ -982,7 +978,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
{
struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
- list_for_each_entry_continue(pos, &evlist->entries, node) {
+ evlist__for_each_continue(evlist, pos) {
if (first->attr.sample_id_all != pos->attr.sample_id_all)
return false;
}
@@ -1008,7 +1004,7 @@ void perf_evlist__close(struct perf_evlist *evlist)
int ncpus = cpu_map__nr(evlist->cpus);
int nthreads = thread_map__nr(evlist->threads);
- list_for_each_entry_reverse(evsel, &evlist->entries, node)
+ evlist__for_each_reverse(evlist, evsel)
perf_evsel__close(evsel, ncpus, nthreads);
}
@@ -1019,7 +1015,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
perf_evlist__update_id_pos(evlist);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
if (err < 0)
goto out_err;
@@ -1034,7 +1030,7 @@ out_err:
int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
const char *argv[], bool pipe_output,
- bool want_signal)
+ void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
{
int child_ready_pipe[2], go_pipe[2];
char bf;
@@ -1078,12 +1074,25 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
execvp(argv[0], (char **)argv);
- perror(argv[0]);
- if (want_signal)
- kill(getppid(), SIGUSR1);
+ if (exec_error) {
+ union sigval val;
+
+ val.sival_int = errno;
+ if (sigqueue(getppid(), SIGUSR1, val))
+ perror(argv[0]);
+ } else
+ perror(argv[0]);
exit(-1);
}
+ if (exec_error) {
+ struct sigaction act = {
+ .sa_flags = SA_SIGINFO,
+ .sa_sigaction = exec_error,
+ };
+ sigaction(SIGUSR1, &act, NULL);
+ }
+
if (target__none(target))
evlist->threads->map[0] = evlist->workload.pid;
@@ -1145,7 +1154,7 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
struct perf_evsel *evsel;
size_t printed = 0;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
perf_evsel__name(evsel));
}
@@ -1193,8 +1202,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
"Error:\t%s.\n"
"Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
- if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
- break;
+ value = perf_event_paranoid();
printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
@@ -1215,3 +1223,20 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
return 0;
}
+
+void perf_evlist__to_front(struct perf_evlist *evlist,
+ struct perf_evsel *move_evsel)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(move);
+
+ if (move_evsel == perf_evlist__first(evlist))
+ return;
+
+ evlist__for_each_safe(evlist, n, evsel) {
+ if (evsel->leader == move_evsel->leader)
+ list_move_tail(&evsel->node, &move);
+ }
+
+ list_splice(&move, &evlist->entries);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 649d6ea..f5173cd 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -12,7 +12,7 @@
struct pollfd;
struct thread_map;
struct cpu_map;
-struct perf_record_opts;
+struct record_opts;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
@@ -97,14 +97,14 @@ void perf_evlist__close(struct perf_evlist *evlist);
void perf_evlist__set_id_pos(struct perf_evlist *evlist);
bool perf_can_sample_identifier(void);
-void perf_evlist__config(struct perf_evlist *evlist,
- struct perf_record_opts *opts);
-int perf_record_opts__config(struct perf_record_opts *opts);
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
+int record_opts__config(struct record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
struct target *target,
const char *argv[], bool pipe_output,
- bool want_signal);
+ void (*exec_error)(int signo, siginfo_t *info,
+ void *ucontext));
int perf_evlist__start_workload(struct perf_evlist *evlist);
int perf_evlist__parse_mmap_pages(const struct option *opt,
@@ -135,7 +135,6 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
-void perf_evlist__delete_maps(struct perf_evlist *evlist);
int perf_evlist__apply_filters(struct perf_evlist *evlist);
void __perf_evlist__set_leader(struct list_head *list);
@@ -193,4 +192,74 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md,
pc->data_tail = tail;
}
+bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
+void perf_evlist__to_front(struct perf_evlist *evlist,
+ struct perf_evsel *move_evsel);
+
+/**
+ * __evlist__for_each - iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each(list, evsel) \
+ list_for_each_entry(evsel, list, node)
+
+/**
+ * evlist__for_each - iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each(evlist, evsel) \
+ __evlist__for_each(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_continue - continue iteration thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_continue(list, evsel) \
+ list_for_each_entry_continue(evsel, list, node)
+
+/**
+ * evlist__for_each_continue - continue iteration thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each_continue(evlist, evsel) \
+ __evlist__for_each_continue(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_reverse(list, evsel) \
+ list_for_each_entry_reverse(evsel, list, node)
+
+/**
+ * evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each_reverse(evlist, evsel) \
+ __evlist__for_each_reverse(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_safe - safely iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @tmp: struct evsel temp iterator
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_safe(list, tmp, evsel) \
+ list_for_each_entry_safe(evsel, tmp, list, node)
+
+/**
+ * evlist__for_each_safe - safely iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ * @tmp: struct evsel temp iterator
+ */
+#define evlist__for_each_safe(evlist, tmp, evsel) \
+ __evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
+
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 46dd4c2..22e18a2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -9,7 +9,7 @@
#include <byteswap.h>
#include <linux/bitops.h>
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
@@ -23,6 +23,7 @@
#include "target.h"
#include "perf_regs.h"
#include "debug.h"
+#include "trace-event.h"
static struct {
bool sample_id_all;
@@ -162,6 +163,8 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->idx = idx;
evsel->attr = *attr;
evsel->leader = evsel;
+ evsel->unit = "";
+ evsel->scale = 1.0;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
@@ -178,47 +181,6 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
return evsel;
}
-struct event_format *event_format__new(const char *sys, const char *name)
-{
- int fd, n;
- char *filename;
- void *bf = NULL, *nbf;
- size_t size = 0, alloc_size = 0;
- struct event_format *format = NULL;
-
- if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
- goto out;
-
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out_free_filename;
-
- do {
- if (size == alloc_size) {
- alloc_size += BUFSIZ;
- nbf = realloc(bf, alloc_size);
- if (nbf == NULL)
- goto out_free_bf;
- bf = nbf;
- }
-
- n = read(fd, bf + size, alloc_size - size);
- if (n < 0)
- goto out_free_bf;
- size += n;
- } while (n > 0);
-
- pevent_parse_format(&format, bf, size, sys);
-
-out_free_bf:
- free(bf);
- close(fd);
-out_free_filename:
- free(filename);
-out:
- return format;
-}
-
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
{
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
@@ -233,7 +195,7 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int
if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
goto out_free;
- evsel->tp_format = event_format__new(sys, name);
+ evsel->tp_format = trace_event__tp_format(sys, name);
if (evsel->tp_format == NULL)
goto out_free;
@@ -246,7 +208,7 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int
return evsel;
out_free:
- free(evsel->name);
+ zfree(&evsel->name);
free(evsel);
return NULL;
}
@@ -566,12 +528,12 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
* enable/disable events specifically, as there's no
* initial traced exec call.
*/
-void perf_evsel__config(struct perf_evsel *evsel,
- struct perf_record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
{
struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
+ bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
attr->inherit = !opts->no_inherit;
@@ -645,7 +607,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
}
- if (target__has_cpu(&opts->target) || opts->target.force_per_cpu)
+ if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
@@ -653,7 +615,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
if (!perf_missing_features.sample_id_all &&
(opts->sample_time || !opts->no_inherit ||
- target__has_cpu(&opts->target) || opts->target.force_per_cpu))
+ target__has_cpu(&opts->target) || per_cpu))
perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples) {
@@ -665,7 +627,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
if (opts->sample_address)
perf_evsel__set_sample_bit(evsel, DATA_SRC);
- if (opts->no_delay) {
+ if (opts->no_buffering) {
attr->watermark = 0;
attr->wakeup_events = 1;
}
@@ -696,7 +658,8 @@ void perf_evsel__config(struct perf_evsel *evsel,
* Setting enable_on_exec for independent events and
* group leaders for traced executed by perf.
*/
- if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
+ if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
+ !opts->initial_delay)
attr->enable_on_exec = 1;
}
@@ -788,8 +751,7 @@ void perf_evsel__free_id(struct perf_evsel *evsel)
{
xyarray__delete(evsel->sample_id);
evsel->sample_id = NULL;
- free(evsel->id);
- evsel->id = NULL;
+ zfree(&evsel->id);
}
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -805,7 +767,7 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
void perf_evsel__free_counts(struct perf_evsel *evsel)
{
- free(evsel->counts);
+ zfree(&evsel->counts);
}
void perf_evsel__exit(struct perf_evsel *evsel)
@@ -819,10 +781,10 @@ void perf_evsel__delete(struct perf_evsel *evsel)
{
perf_evsel__exit(evsel);
close_cgroup(evsel->cgrp);
- free(evsel->group_name);
+ zfree(&evsel->group_name);
if (evsel->tp_format)
pevent_free_format(evsel->tp_format);
- free(evsel->name);
+ zfree(&evsel->name);
free(evsel);
}
@@ -1998,8 +1960,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
evsel->attr.type = PERF_TYPE_SOFTWARE;
evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
- free(evsel->name);
- evsel->name = NULL;
+ zfree(&evsel->name);
return true;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 1ea7c92..f1b3256 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -68,6 +68,8 @@ struct perf_evsel {
u32 ids;
struct hists hists;
char *name;
+ double scale;
+ const char *unit;
struct event_format *tp_format;
union {
void *priv;
@@ -94,7 +96,7 @@ struct perf_evsel {
struct cpu_map;
struct thread_map;
struct perf_evlist;
-struct perf_record_opts;
+struct record_opts;
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
@@ -118,7 +120,7 @@ void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
void perf_evsel__config(struct perf_evsel *evsel,
- struct perf_record_opts *opts);
+ struct record_opts *opts);
int __perf_evsel__sample_size(u64 sample_type);
void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
@@ -138,6 +140,7 @@ extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
char *bf, size_t size);
const char *perf_evsel__name(struct perf_evsel *evsel);
+
const char *perf_evsel__group_name(struct perf_evsel *evsel);
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 1cd0357..bb3e0ed 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -177,7 +177,7 @@ perf_header__set_cmdline(int argc, const char **argv)
continue; \
else
-static int write_buildid(char *name, size_t name_len, u8 *build_id,
+static int write_buildid(const char *name, size_t name_len, u8 *build_id,
pid_t pid, u16 misc, int fd)
{
int err;
@@ -209,7 +209,7 @@ static int __dsos__write_buildid_table(struct list_head *head,
dsos__for_each_with_build_id(pos, head) {
int err;
- char *name;
+ const char *name;
size_t name_len;
if (!pos->hit)
@@ -387,7 +387,7 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
{
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
bool is_vdso = is_vdso_map(dso->short_name);
- char *name = dso->long_name;
+ const char *name = dso->long_name;
char nm[PATH_MAX];
if (dso__is_kcore(dso)) {
@@ -643,8 +643,7 @@ static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
if (ret < 0)
return ret;
- list_for_each_entry(evsel, &evlist->entries, node) {
-
+ evlist__for_each(evlist, evsel) {
ret = do_write(fd, &evsel->attr, sz);
if (ret < 0)
return ret;
@@ -800,10 +799,10 @@ static void free_cpu_topo(struct cpu_topo *tp)
return;
for (i = 0 ; i < tp->core_sib; i++)
- free(tp->core_siblings[i]);
+ zfree(&tp->core_siblings[i]);
for (i = 0 ; i < tp->thread_sib; i++)
- free(tp->thread_siblings[i]);
+ zfree(&tp->thread_siblings[i]);
free(tp);
}
@@ -1092,7 +1091,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
if (ret < 0)
return ret;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (perf_evsel__is_group_leader(evsel) &&
evsel->nr_members > 1) {
const char *name = evsel->group_name ?: "{anon_group}";
@@ -1232,10 +1231,8 @@ static void free_event_desc(struct perf_evsel *events)
return;
for (evsel = events; evsel->attr.size; evsel++) {
- if (evsel->name)
- free(evsel->name);
- if (evsel->id)
- free(evsel->id);
+ zfree(&evsel->name);
+ zfree(&evsel->id);
}
free(events);
@@ -1326,8 +1323,7 @@ read_event_desc(struct perf_header *ph, int fd)
}
}
out:
- if (buf)
- free(buf);
+ free(buf);
return events;
error:
if (events)
@@ -1490,7 +1486,7 @@ static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
session = container_of(ph, struct perf_session, header);
- list_for_each_entry(evsel, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, evsel) {
if (perf_evsel__is_group_leader(evsel) &&
evsel->nr_members > 1) {
fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
@@ -1709,7 +1705,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
- size_t ret;
+ ssize_t ret;
u32 nr;
ret = readn(fd, &nr, sizeof(nr));
@@ -1753,7 +1749,7 @@ static int process_total_mem(struct perf_file_section *section __maybe_unused,
void *data __maybe_unused)
{
uint64_t mem;
- size_t ret;
+ ssize_t ret;
ret = readn(fd, &mem, sizeof(mem));
if (ret != sizeof(mem))
@@ -1771,7 +1767,7 @@ perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (evsel->idx == idx)
return evsel;
}
@@ -1822,7 +1818,7 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused,
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
- size_t ret;
+ ssize_t ret;
char *str;
u32 nr, i;
struct strbuf sb;
@@ -1858,7 +1854,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
- size_t ret;
+ ssize_t ret;
u32 nr, i;
char *str;
struct strbuf sb;
@@ -1914,7 +1910,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
- size_t ret;
+ ssize_t ret;
u32 nr, node, i;
char *str;
uint64_t mem_total, mem_free;
@@ -1974,7 +1970,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
- size_t ret;
+ ssize_t ret;
char *name;
u32 pmu_num;
u32 type;
@@ -2074,7 +2070,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
session->evlist->nr_groups = nr_groups;
i = nr = 0;
- list_for_each_entry(evsel, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, evsel) {
if (evsel->idx == (int) desc[i].leader_idx) {
evsel->leader = evsel;
/* {anon_group} is a dummy name */
@@ -2108,7 +2104,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
ret = 0;
out_free:
for (i = 0; i < nr_groups; i++)
- free(desc[i].name);
+ zfree(&desc[i].name);
free(desc);
return ret;
@@ -2301,7 +2297,7 @@ int perf_session__write_header(struct perf_session *session,
lseek(fd, sizeof(f_header), SEEK_SET);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(session->evlist, evsel) {
evsel->id_offset = lseek(fd, 0, SEEK_CUR);
err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
if (err < 0) {
@@ -2312,7 +2308,7 @@ int perf_session__write_header(struct perf_session *session,
attr_offset = lseek(fd, 0, SEEK_CUR);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
f_attr = (struct perf_file_attr){
.attr = evsel->attr,
.ids = {
@@ -2327,7 +2323,8 @@ int perf_session__write_header(struct perf_session *session,
}
}
- header->data_offset = lseek(fd, 0, SEEK_CUR);
+ if (!header->data_offset)
+ header->data_offset = lseek(fd, 0, SEEK_CUR);
header->feat_offset = header->data_offset + header->data_size;
if (at_exit) {
@@ -2534,7 +2531,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
int perf_file_header__read(struct perf_file_header *header,
struct perf_header *ph, int fd)
{
- int ret;
+ ssize_t ret;
lseek(fd, 0, SEEK_SET);
@@ -2628,7 +2625,7 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
struct perf_header *ph, int fd,
bool repipe)
{
- int ret;
+ ssize_t ret;
ret = readn(fd, header, sizeof(*header));
if (ret <= 0)
@@ -2669,7 +2666,7 @@ static int read_attr(int fd, struct perf_header *ph,
struct perf_event_attr *attr = &f_attr->attr;
size_t sz, left;
size_t our_sz = sizeof(f_attr->attr);
- int ret;
+ ssize_t ret;
memset(f_attr, 0, sizeof(*f_attr));
@@ -2744,7 +2741,7 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
{
struct perf_evsel *pos;
- list_for_each_entry(pos, &evlist->entries, node) {
+ evlist__for_each(evlist, pos) {
if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
perf_evsel__prepare_tracepoint_event(pos, pevent))
return -1;
@@ -2834,11 +2831,11 @@ int perf_session__read_header(struct perf_session *session)
symbol_conf.nr_events = nr_attrs;
- perf_header__process_sections(header, fd, &session->pevent,
+ perf_header__process_sections(header, fd, &session->tevent,
perf_file_section__process);
if (perf_evlist__prepare_tracepoint_events(session->evlist,
- session->pevent))
+ session->tevent.pevent))
goto out_delete_evlist;
return 0;
@@ -2892,7 +2889,7 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_evsel *evsel;
int err = 0;
- list_for_each_entry(evsel, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, evsel) {
err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
evsel->id, process);
if (err) {
@@ -3003,7 +3000,7 @@ int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
lseek(fd, offset + sizeof(struct tracing_data_event),
SEEK_SET);
- size_read = trace_report(fd, &session->pevent,
+ size_read = trace_report(fd, &session->tevent,
session->repipe);
padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
@@ -3025,7 +3022,7 @@ int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
}
perf_evlist__prepare_tracepoint_events(session->evlist,
- session->pevent);
+ session->tevent.pevent);
return size_read + padding;
}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 307c9ae..a2d047b 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -77,16 +77,16 @@ struct perf_session_env {
unsigned long long total_mem;
int nr_cmdline;
- char *cmdline;
int nr_sibling_cores;
- char *sibling_cores;
int nr_sibling_threads;
- char *sibling_threads;
int nr_numa_nodes;
- char *numa_nodes;
int nr_pmu_mappings;
- char *pmu_mappings;
int nr_groups;
+ char *cmdline;
+ char *sibling_cores;
+ char *sibling_threads;
+ char *numa_nodes;
+ char *pmu_mappings;
};
struct perf_header {
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 8b1f6e8..86c37c4 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -22,8 +22,8 @@ static void clean_cmdnames(struct cmdnames *cmds)
unsigned int i;
for (i = 0; i < cmds->cnt; ++i)
- free(cmds->names[i]);
- free(cmds->names);
+ zfree(&cmds->names[i]);
+ zfree(&cmds->names);
cmds->cnt = 0;
cmds->alloc = 0;
}
@@ -263,9 +263,8 @@ static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
for (i = 0; i < old->cnt; i++)
cmds->names[cmds->cnt++] = old->names[i];
- free(old->names);
+ zfree(&old->names);
old->cnt = 0;
- old->names = NULL;
}
const char *help_unknown_cmd(const char *cmd)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 822903e..e4e6249 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,4 +1,3 @@
-#include "annotate.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
@@ -182,21 +181,21 @@ void hists__output_recalc_col_len(struct hists *hists, int max_rows)
}
}
-static void hist_entry__add_cpumode_period(struct hist_entry *he,
- unsigned int cpumode, u64 period)
+static void he_stat__add_cpumode_period(struct he_stat *he_stat,
+ unsigned int cpumode, u64 period)
{
switch (cpumode) {
case PERF_RECORD_MISC_KERNEL:
- he->stat.period_sys += period;
+ he_stat->period_sys += period;
break;
case PERF_RECORD_MISC_USER:
- he->stat.period_us += period;
+ he_stat->period_us += period;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
- he->stat.period_guest_sys += period;
+ he_stat->period_guest_sys += period;
break;
case PERF_RECORD_MISC_GUEST_USER:
- he->stat.period_guest_us += period;
+ he_stat->period_guest_us += period;
break;
default:
break;
@@ -223,10 +222,10 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->weight += src->weight;
}
-static void hist_entry__decay(struct hist_entry *he)
+static void he_stat__decay(struct he_stat *he_stat)
{
- he->stat.period = (he->stat.period * 7) / 8;
- he->stat.nr_events = (he->stat.nr_events * 7) / 8;
+ he_stat->period = (he_stat->period * 7) / 8;
+ he_stat->nr_events = (he_stat->nr_events * 7) / 8;
/* XXX need decay for weight too? */
}
@@ -237,7 +236,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (prev_period == 0)
return true;
- hist_entry__decay(he);
+ he_stat__decay(&he->stat);
if (!he->filtered)
hists->stats.total_period -= prev_period - he->stat.period;
@@ -342,15 +341,15 @@ static u8 symbol__parent_filter(const struct symbol *parent)
}
static struct hist_entry *add_hist_entry(struct hists *hists,
- struct hist_entry *entry,
- struct addr_location *al,
- u64 period,
- u64 weight)
+ struct hist_entry *entry,
+ struct addr_location *al)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
+ u64 period = entry->stat.period;
+ u64 weight = entry->stat.weight;
p = &hists->entries_in->rb_node;
@@ -373,7 +372,7 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
* This mem info was allocated from machine__resolve_mem
* and will not be used anymore.
*/
- free(entry->mem_info);
+ zfree(&entry->mem_info);
/* If the map of an existing hist_entry has
* become out-of-date due to an exec() or
@@ -403,7 +402,7 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
- hist_entry__add_cpumode_period(he, al->cpumode, period);
+ he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
return he;
}
@@ -437,7 +436,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
.transaction = transaction,
};
- return add_hist_entry(hists, &entry, al, period, weight);
+ return add_hist_entry(hists, &entry, al);
}
int64_t
@@ -476,8 +475,8 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
void hist_entry__free(struct hist_entry *he)
{
- free(he->branch_info);
- free(he->mem_info);
+ zfree(&he->branch_info);
+ zfree(&he->mem_info);
free_srcline(he->srcline);
free(he);
}
@@ -807,16 +806,6 @@ void hists__filter_by_symbol(struct hists *hists)
}
}
-int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
-{
- return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
-}
-
-int hist_entry__annotate(struct hist_entry *he, size_t privsize)
-{
- return symbol__annotate(he->ms.sym, he->ms.map, privsize);
-}
-
void events_stats__inc(struct events_stats *stats, u32 type)
{
++stats->nr_events[0];
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index b621347a..a59743f 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -111,9 +111,6 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp);
-int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
-int hist_entry__annotate(struct hist_entry *he, size_t privsize);
-
void hists__filter_by_dso(struct hists *hists);
void hists__filter_by_thread(struct hists *hists);
void hists__filter_by_symbol(struct hists *hists);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 84cdb07..ded7459 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -9,6 +9,7 @@
#include "strlist.h"
#include "thread.h"
#include <stdbool.h>
+#include <symbol/kallsyms.h>
#include "unwind.h"
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
@@ -26,6 +27,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->pid = pid;
machine->symbol_filter = NULL;
+ machine->id_hdr_size = 0;
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
@@ -101,8 +103,7 @@ void machine__exit(struct machine *machine)
map_groups__exit(&machine->kmaps);
dsos__delete(&machine->user_dsos);
dsos__delete(&machine->kernel_dsos);
- free(machine->root_dir);
- machine->root_dir = NULL;
+ zfree(&machine->root_dir);
}
void machine__delete(struct machine *machine)
@@ -502,15 +503,11 @@ static u64 machine__get_kernel_start_addr(struct machine *machine)
char path[PATH_MAX];
struct process_args args;
- if (machine__is_host(machine)) {
- filename = "/proc/kallsyms";
- } else {
- if (machine__is_default_guest(machine))
- filename = (char *)symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
+ if (machine__is_default_guest(machine))
+ filename = (char *)symbol_conf.default_guest_kallsyms;
+ else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ filename = path;
}
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
@@ -565,11 +562,10 @@ void machine__destroy_kernel_maps(struct machine *machine)
* on one of them.
*/
if (type == MAP__FUNCTION) {
- free((char *)kmap->ref_reloc_sym->name);
- kmap->ref_reloc_sym->name = NULL;
- free(kmap->ref_reloc_sym);
- }
- kmap->ref_reloc_sym = NULL;
+ zfree((char **)&kmap->ref_reloc_sym->name);
+ zfree(&kmap->ref_reloc_sym);
+ } else
+ kmap->ref_reloc_sym = NULL;
}
map__delete(machine->vmlinux_maps[type]);
@@ -767,8 +763,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
ret = -1;
goto out;
}
- dso__set_long_name(map->dso, long_name);
- map->dso->lname_alloc = 1;
+ dso__set_long_name(map->dso, long_name, true);
dso__kernel_module_get_build_id(map->dso, "");
}
}
@@ -939,8 +934,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (name == NULL)
goto out_problem;
- map->dso->short_name = name;
- map->dso->sname_alloc = 1;
+ dso__set_short_name(map->dso, name, true);
map->end = map->start + event->mmap.len;
} else if (is_kernel_mmap) {
const char *symbol_name = (event->mmap.filename +
@@ -1320,8 +1314,6 @@ static int machine__resolve_callchain_sample(struct machine *machine,
*root_al = al;
callchain_cursor_reset(&callchain_cursor);
}
- if (!symbol_conf.use_callchain)
- break;
}
err = callchain_cursor_append(&callchain_cursor,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index ef5bc91..9b9bd71 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -11,6 +11,7 @@
#include "strlist.h"
#include "vdso.h"
#include "build-id.h"
+#include "util.h"
#include <linux/string.h>
const char *map_type__name[MAP__NR_TYPES] = {
@@ -252,6 +253,22 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
return fprintf(fp, "%s", dsoname);
}
+int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
+ FILE *fp)
+{
+ char *srcline;
+ int ret = 0;
+
+ if (map && map->dso) {
+ srcline = get_srcline(map->dso,
+ map__rip_2objdump(map, addr));
+ if (srcline != SRCLINE_UNKNOWN)
+ ret = fprintf(fp, "%s%s", prefix, srcline);
+ free_srcline(srcline);
+ }
+ return ret;
+}
+
/**
* map__rip_2objdump - convert symbol start address to objdump address.
* @map: memory map
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index e4e259c..18068c6 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -103,6 +103,8 @@ struct map *map__clone(struct map *map);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
+int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
+ FILE *fp);
int map__load(struct map *map, symbol_filter_t filter);
struct symbol *map__find_symbol(struct map *map,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 6de6f89..a7f1b6a 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -10,7 +10,7 @@
#include "symbol.h"
#include "cache.h"
#include "header.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include "parse-events-bison.h"
#define YY_EXTRA_TYPE int
#include "parse-events-flex.h"
@@ -204,7 +204,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
}
path->name = malloc(MAX_EVENT_LENGTH);
if (!path->name) {
- free(path->system);
+ zfree(&path->system);
free(path);
return NULL;
}
@@ -236,8 +236,8 @@ struct tracepoint_path *tracepoint_name_to_path(const char *name)
path->name = strdup(str+1);
if (path->system == NULL || path->name == NULL) {
- free(path->system);
- free(path->name);
+ zfree(&path->system);
+ zfree(&path->name);
free(path);
path = NULL;
}
@@ -269,9 +269,10 @@ const char *event_type(int type)
-static int __add_event(struct list_head *list, int *idx,
- struct perf_event_attr *attr,
- char *name, struct cpu_map *cpus)
+static struct perf_evsel *
+__add_event(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct cpu_map *cpus)
{
struct perf_evsel *evsel;
@@ -279,19 +280,19 @@ static int __add_event(struct list_head *list, int *idx,
evsel = perf_evsel__new_idx(attr, (*idx)++);
if (!evsel)
- return -ENOMEM;
+ return NULL;
evsel->cpus = cpus;
if (name)
evsel->name = strdup(name);
list_add_tail(&evsel->node, list);
- return 0;
+ return evsel;
}
static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name)
{
- return __add_event(list, idx, attr, name, NULL);
+ return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM;
}
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -633,6 +634,9 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
{
struct perf_event_attr attr;
struct perf_pmu *pmu;
+ struct perf_evsel *evsel;
+ char *unit;
+ double scale;
pmu = perf_pmu__find(name);
if (!pmu)
@@ -640,7 +644,7 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
memset(&attr, 0, sizeof(attr));
- if (perf_pmu__check_alias(pmu, head_config))
+ if (perf_pmu__check_alias(pmu, head_config, &unit, &scale))
return -EINVAL;
/*
@@ -652,8 +656,14 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
if (perf_pmu__config(pmu, &attr, head_config))
return -EINVAL;
- return __add_event(list, idx, &attr, pmu_event_name(head_config),
- pmu->cpus);
+ evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
+ pmu->cpus);
+ if (evsel) {
+ evsel->unit = unit;
+ evsel->scale = scale;
+ }
+
+ return evsel ? 0 : -ENOMEM;
}
int parse_events__modifier_group(struct list_head *list,
@@ -810,8 +820,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
if (!add && get_event_modifier(&mod, str, NULL))
return -EINVAL;
- list_for_each_entry(evsel, list, node) {
-
+ __evlist__for_each(list, evsel) {
if (add && get_event_modifier(&mod, str, evsel))
return -EINVAL;
@@ -835,7 +844,7 @@ int parse_events_name(struct list_head *list, char *name)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, list, node) {
+ __evlist__for_each(list, evsel) {
if (!evsel->name)
evsel->name = strdup(name);
}
@@ -907,7 +916,7 @@ int parse_events_terms(struct list_head *terms, const char *str)
ret = parse_events__scanner(str, &data, PE_START_TERMS);
if (!ret) {
list_splice(data.terms, terms);
- free(data.terms);
+ zfree(&data.terms);
return 0;
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 31f404a..d22e3f80 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -78,6 +78,8 @@ static int get_value(struct parse_opt_ctx_t *p,
case OPTION_BOOLEAN:
*(bool *)opt->value = unset ? false : true;
+ if (opt->set)
+ *(bool *)opt->set = true;
return 0;
case OPTION_INCR:
@@ -224,6 +226,24 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
return 0;
}
if (!rest) {
+ if (!prefixcmp(options->long_name, "no-")) {
+ /*
+ * The long name itself starts with "no-", so
+ * accept the option without "no-" so that users
+ * do not have to enter "no-no-" to get the
+ * negation.
+ */
+ rest = skip_prefix(arg, options->long_name + 3);
+ if (rest) {
+ flags |= OPT_UNSET;
+ goto match;
+ }
+ /* Abbreviated case */
+ if (!prefixcmp(options->long_name + 3, arg)) {
+ flags |= OPT_UNSET;
+ goto is_abbreviated;
+ }
+ }
/* abbreviated? */
if (!strncmp(options->long_name, arg, arg_end - arg)) {
is_abbreviated:
@@ -259,6 +279,7 @@ is_abbreviated:
if (!rest)
continue;
}
+match:
if (*rest) {
if (*rest != '=')
continue;
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index b0241e2..cbf0149 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -82,6 +82,9 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
* OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in
* the value when met.
* CALLBACKS can use it like they want.
+ *
+ * `set`::
+ * whether an option was set by the user
*/
struct option {
enum parse_opt_type type;
@@ -94,6 +97,7 @@ struct option {
int flags;
parse_opt_cb *callback;
intptr_t defval;
+ bool *set;
};
#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
@@ -103,6 +107,10 @@ struct option {
#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
+#define OPT_BOOLEAN_SET(s, l, v, os, h) \
+ { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \
+ .value = check_vtype(v, bool *), .help = (h), \
+ .set = check_vtype(os, bool *)}
#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
#define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) }
#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index c232d8d..d9cab4d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1,19 +1,23 @@
#include <linux/list.h>
#include <sys/types.h>
-#include <sys/stat.h>
#include <unistd.h>
#include <stdio.h>
#include <dirent.h>
#include "fs.h"
+#include <locale.h>
#include "util.h"
#include "pmu.h"
#include "parse-events.h"
#include "cpumap.h"
+#define UNIT_MAX_LEN 31 /* max length for event unit name */
+
struct perf_pmu_alias {
char *name;
struct list_head terms;
struct list_head list;
+ char unit[UNIT_MAX_LEN+1];
+ double scale;
};
struct perf_pmu_format {
@@ -94,7 +98,80 @@ static int pmu_format(const char *name, struct list_head *format)
return 0;
}
-static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
+static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *name)
+{
+ struct stat st;
+ ssize_t sret;
+ char scale[128];
+ int fd, ret = -1;
+ char path[PATH_MAX];
+ char *lc;
+
+ snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ if (fstat(fd, &st) < 0)
+ goto error;
+
+ sret = read(fd, scale, sizeof(scale)-1);
+ if (sret < 0)
+ goto error;
+
+ scale[sret] = '\0';
+ /*
+ * save current locale
+ */
+ lc = setlocale(LC_NUMERIC, NULL);
+
+ /*
+ * force to C locale to ensure kernel
+ * scale string is converted correctly.
+ * kernel uses default C locale.
+ */
+ setlocale(LC_NUMERIC, "C");
+
+ alias->scale = strtod(scale, NULL);
+
+ /* restore locale */
+ setlocale(LC_NUMERIC, lc);
+
+ ret = 0;
+error:
+ close(fd);
+ return ret;
+}
+
+static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *name)
+{
+ char path[PATH_MAX];
+ ssize_t sret;
+ int fd;
+
+ snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ sret = read(fd, alias->unit, UNIT_MAX_LEN);
+ if (sret < 0)
+ goto error;
+
+ close(fd);
+
+ alias->unit[sret] = '\0';
+
+ return 0;
+error:
+ close(fd);
+ alias->unit[0] = '\0';
+ return -1;
+}
+
+static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file)
{
struct perf_pmu_alias *alias;
char buf[256];
@@ -110,6 +187,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
return -ENOMEM;
INIT_LIST_HEAD(&alias->terms);
+ alias->scale = 1.0;
+ alias->unit[0] = '\0';
+
ret = parse_events_terms(&alias->terms, buf);
if (ret) {
free(alias);
@@ -117,7 +197,14 @@ static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
}
alias->name = strdup(name);
+ /*
+ * load unit name and scale if available
+ */
+ perf_pmu__parse_unit(alias, dir, name);
+ perf_pmu__parse_scale(alias, dir, name);
+
list_add_tail(&alias->list, list);
+
return 0;
}
@@ -129,6 +216,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
{
struct dirent *evt_ent;
DIR *event_dir;
+ size_t len;
int ret = 0;
event_dir = opendir(dir);
@@ -143,13 +231,24 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
+ /*
+ * skip .unit and .scale info files
+ * parsed in perf_pmu__new_alias()
+ */
+ len = strlen(name);
+ if (len > 5 && !strcmp(name + len - 5, ".unit"))
+ continue;
+ if (len > 6 && !strcmp(name + len - 6, ".scale"))
+ continue;
+
snprintf(path, PATH_MAX, "%s/%s", dir, name);
ret = -EINVAL;
file = fopen(path, "r");
if (!file)
break;
- ret = perf_pmu__new_alias(head, name, file);
+
+ ret = perf_pmu__new_alias(head, dir, name, file);
fclose(file);
}
@@ -406,7 +505,7 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value)
/*
* Setup one of config[12] attr members based on the
- * user input data - temr parameter.
+ * user input data - term parameter.
*/
static int pmu_config_term(struct list_head *formats,
struct perf_event_attr *attr,
@@ -508,16 +607,42 @@ static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
return NULL;
}
+
+static int check_unit_scale(struct perf_pmu_alias *alias,
+ char **unit, double *scale)
+{
+ /*
+ * Only one term in event definition can
+ * define unit and scale, fail if there's
+ * more than one.
+ */
+ if ((*unit && alias->unit) ||
+ (*scale && alias->scale))
+ return -EINVAL;
+
+ if (alias->unit)
+ *unit = alias->unit;
+
+ if (alias->scale)
+ *scale = alias->scale;
+
+ return 0;
+}
+
/*
* Find alias in the terms list and replace it with the terms
* defined for the alias
*/
-int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+ char **unit, double *scale)
{
struct parse_events_term *term, *h;
struct perf_pmu_alias *alias;
int ret;
+ *unit = NULL;
+ *scale = 0;
+
list_for_each_entry_safe(term, h, head_terms, list) {
alias = pmu_find_alias(pmu, term);
if (!alias)
@@ -525,6 +650,11 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
ret = pmu_alias_terms(alias, &term->list);
if (ret)
return ret;
+
+ ret = check_unit_scale(alias, unit, scale);
+ if (ret)
+ return ret;
+
list_del(&term->list);
free(term);
}
@@ -625,7 +755,7 @@ void print_pmu_events(const char *event_glob, bool name_only)
continue;
}
printf(" %-50s [Kernel PMU event]\n", aliases[j]);
- free(aliases[j]);
+ zfree(&aliases[j]);
printed++;
}
if (printed)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 1179b26..9183380 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -28,7 +28,8 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
int perf_pmu__config_terms(struct list_head *formats,
struct perf_event_attr *attr,
struct list_head *head_terms);
-int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+ char **unit, double *scale);
struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
struct list_head *head_terms);
int perf_pmu_wrap(void);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 9c6989c..a8a9b6c 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -40,7 +40,7 @@
#include "color.h"
#include "symbol.h"
#include "thread.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include "trace-event.h" /* For __maybe_unused */
#include "probe-event.h"
#include "probe-finder.h"
@@ -72,6 +72,7 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
static int convert_name_to_addr(struct perf_probe_event *pev,
const char *exec);
+static void clear_probe_trace_event(struct probe_trace_event *tev);
static struct machine machine;
/* Initialize symbol maps and path of vmlinux/modules */
@@ -154,7 +155,7 @@ static struct dso *kernel_get_module_dso(const char *module)
vmlinux_name = symbol_conf.vmlinux_name;
if (vmlinux_name) {
- if (dso__load_vmlinux(dso, map, vmlinux_name, NULL) <= 0)
+ if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0)
return NULL;
} else {
if (dso__load_vmlinux_path(dso, map, NULL) <= 0) {
@@ -186,6 +187,37 @@ static int init_user_exec(void)
return ret;
}
+static int convert_exec_to_group(const char *exec, char **result)
+{
+ char *ptr1, *ptr2, *exec_copy;
+ char buf[64];
+ int ret;
+
+ exec_copy = strdup(exec);
+ if (!exec_copy)
+ return -ENOMEM;
+
+ ptr1 = basename(exec_copy);
+ if (!ptr1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ptr2 = strpbrk(ptr1, "-._");
+ if (ptr2)
+ *ptr2 = '\0';
+ ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1);
+ if (ret < 0)
+ goto out;
+
+ *result = strdup(buf);
+ ret = *result ? 0 : -ENOMEM;
+
+out:
+ free(exec_copy);
+ return ret;
+}
+
static int convert_to_perf_probe_point(struct probe_trace_point *tp,
struct perf_probe_point *pp)
{
@@ -261,6 +293,68 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
return 0;
}
+static int get_text_start_address(const char *exec, unsigned long *address)
+{
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ int fd, ret = -ENOENT;
+
+ fd = open(exec, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return -EINVAL;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out;
+
+ if (!elf_section_by_name(elf, &ehdr, &shdr, ".text", NULL))
+ goto out;
+
+ *address = shdr.sh_addr - shdr.sh_offset;
+ ret = 0;
+out:
+ elf_end(elf);
+ return ret;
+}
+
+static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *exec)
+{
+ int i, ret = 0;
+ unsigned long offset, stext = 0;
+ char buf[32];
+
+ if (!exec)
+ return 0;
+
+ ret = get_text_start_address(exec, &stext);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ntevs && ret >= 0; i++) {
+ offset = tevs[i].point.address - stext;
+ offset += tevs[i].point.offset;
+ tevs[i].point.offset = 0;
+ zfree(&tevs[i].point.symbol);
+ ret = e_snprintf(buf, 32, "0x%lx", offset);
+ if (ret < 0)
+ break;
+ tevs[i].point.module = strdup(exec);
+ tevs[i].point.symbol = strdup(buf);
+ if (!tevs[i].point.symbol || !tevs[i].point.module) {
+ ret = -ENOMEM;
+ break;
+ }
+ tevs[i].uprobes = true;
+ }
+
+ return ret;
+}
+
static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *module)
{
@@ -290,12 +384,18 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
}
}
- if (tmp)
- free(tmp);
-
+ free(tmp);
return ret;
}
+static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
+{
+ int i;
+
+ for (i = 0; i < ntevs; i++)
+ clear_probe_trace_event(tevs + i);
+}
+
/* Try to find perf_probe_event with debuginfo */
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
@@ -305,15 +405,6 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct debuginfo *dinfo;
int ntevs, ret = 0;
- if (pev->uprobes) {
- if (need_dwarf) {
- pr_warning("Debuginfo-analysis is not yet supported"
- " with -x/--exec option.\n");
- return -ENOSYS;
- }
- return convert_name_to_addr(pev, target);
- }
-
dinfo = open_debuginfo(target);
if (!dinfo) {
@@ -332,9 +423,18 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("find %d probe_trace_events.\n", ntevs);
- if (target)
- ret = add_module_to_probe_trace_events(*tevs, ntevs,
- target);
+ if (target) {
+ if (pev->uprobes)
+ ret = add_exec_to_probe_trace_events(*tevs,
+ ntevs, target);
+ else
+ ret = add_module_to_probe_trace_events(*tevs,
+ ntevs, target);
+ }
+ if (ret < 0) {
+ clear_probe_trace_events(*tevs, ntevs);
+ zfree(tevs);
+ }
return ret < 0 ? ret : ntevs;
}
@@ -401,15 +501,13 @@ static int get_real_path(const char *raw_path, const char *comp_dir,
case EFAULT:
raw_path = strchr(++raw_path, '/');
if (!raw_path) {
- free(*new_path);
- *new_path = NULL;
+ zfree(new_path);
return -ENOENT;
}
continue;
default:
- free(*new_path);
- *new_path = NULL;
+ zfree(new_path);
return -errno;
}
}
@@ -580,7 +678,7 @@ static int show_available_vars_at(struct debuginfo *dinfo,
*/
fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
vl->point.offset);
- free(vl->point.symbol);
+ zfree(&vl->point.symbol);
nvars = 0;
if (vl->vars) {
strlist__for_each(node, vl->vars) {
@@ -647,16 +745,14 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs __maybe_unused,
- int max_tevs __maybe_unused, const char *target)
+ int max_tevs __maybe_unused,
+ const char *target __maybe_unused)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
- if (pev->uprobes)
- return convert_name_to_addr(pev, target);
-
return 0;
}
@@ -678,6 +774,28 @@ int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
}
#endif
+void line_range__clear(struct line_range *lr)
+{
+ struct line_node *ln;
+
+ free(lr->function);
+ free(lr->file);
+ free(lr->path);
+ free(lr->comp_dir);
+ while (!list_empty(&lr->line_list)) {
+ ln = list_first_entry(&lr->line_list, struct line_node, list);
+ list_del(&ln->list);
+ free(ln);
+ }
+ memset(lr, 0, sizeof(*lr));
+}
+
+void line_range__init(struct line_range *lr)
+{
+ memset(lr, 0, sizeof(*lr));
+ INIT_LIST_HEAD(&lr->line_list);
+}
+
static int parse_line_num(char **ptr, int *val, const char *what)
{
const char *start = *ptr;
@@ -1278,8 +1396,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
error:
pr_debug("Failed to synthesize perf probe point: %s\n",
strerror(-ret));
- if (buf)
- free(buf);
+ free(buf);
return NULL;
}
@@ -1480,34 +1597,25 @@ void clear_perf_probe_event(struct perf_probe_event *pev)
struct perf_probe_arg_field *field, *next;
int i;
- if (pev->event)
- free(pev->event);
- if (pev->group)
- free(pev->group);
- if (pp->file)
- free(pp->file);
- if (pp->function)
- free(pp->function);
- if (pp->lazy_line)
- free(pp->lazy_line);
+ free(pev->event);
+ free(pev->group);
+ free(pp->file);
+ free(pp->function);
+ free(pp->lazy_line);
+
for (i = 0; i < pev->nargs; i++) {
- if (pev->args[i].name)
- free(pev->args[i].name);
- if (pev->args[i].var)
- free(pev->args[i].var);
- if (pev->args[i].type)
- free(pev->args[i].type);
+ free(pev->args[i].name);
+ free(pev->args[i].var);
+ free(pev->args[i].type);
field = pev->args[i].field;
while (field) {
next = field->next;
- if (field->name)
- free(field->name);
+ zfree(&field->name);
free(field);
field = next;
}
}
- if (pev->args)
- free(pev->args);
+ free(pev->args);
memset(pev, 0, sizeof(*pev));
}
@@ -1516,21 +1624,14 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
struct probe_trace_arg_ref *ref, *next;
int i;
- if (tev->event)
- free(tev->event);
- if (tev->group)
- free(tev->group);
- if (tev->point.symbol)
- free(tev->point.symbol);
- if (tev->point.module)
- free(tev->point.module);
+ free(tev->event);
+ free(tev->group);
+ free(tev->point.symbol);
+ free(tev->point.module);
for (i = 0; i < tev->nargs; i++) {
- if (tev->args[i].name)
- free(tev->args[i].name);
- if (tev->args[i].value)
- free(tev->args[i].value);
- if (tev->args[i].type)
- free(tev->args[i].type);
+ free(tev->args[i].name);
+ free(tev->args[i].value);
+ free(tev->args[i].type);
ref = tev->args[i].ref;
while (ref) {
next = ref->next;
@@ -1538,8 +1639,7 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
ref = next;
}
}
- if (tev->args)
- free(tev->args);
+ free(tev->args);
memset(tev, 0, sizeof(*tev));
}
@@ -1913,14 +2013,29 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
int max_tevs, const char *target)
{
struct symbol *sym;
- int ret = 0, i;
+ int ret, i;
struct probe_trace_event *tev;
+ if (pev->uprobes && !pev->group) {
+ /* Replace group name if not given */
+ ret = convert_exec_to_group(target, &pev->group);
+ if (ret != 0) {
+ pr_warning("Failed to make a group name.\n");
+ return ret;
+ }
+ }
+
/* Convert perf_probe_event with debuginfo */
ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
if (ret != 0)
return ret; /* Found in debuginfo or got an error */
+ if (pev->uprobes) {
+ ret = convert_name_to_addr(pev, target);
+ if (ret < 0)
+ return ret;
+ }
+
/* Allocate trace event buffer */
tev = *tevs = zalloc(sizeof(struct probe_trace_event));
if (tev == NULL)
@@ -2056,7 +2171,7 @@ end:
for (i = 0; i < npevs; i++) {
for (j = 0; j < pkgs[i].ntevs; j++)
clear_probe_trace_event(&pkgs[i].tevs[j]);
- free(pkgs[i].tevs);
+ zfree(&pkgs[i].tevs);
}
free(pkgs);
@@ -2281,7 +2396,7 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
struct perf_probe_point *pp = &pev->point;
struct symbol *sym;
struct map *map = NULL;
- char *function = NULL, *name = NULL;
+ char *function = NULL;
int ret = -EINVAL;
unsigned long long vaddr = 0;
@@ -2297,12 +2412,7 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
goto out;
}
- name = realpath(exec, NULL);
- if (!name) {
- pr_warning("Cannot find realpath for %s.\n", exec);
- goto out;
- }
- map = dso__new_map(name);
+ map = dso__new_map(exec);
if (!map) {
pr_warning("Cannot find appropriate DSO for %s.\n", exec);
goto out;
@@ -2367,7 +2477,5 @@ out:
}
if (function)
free(function);
- if (name)
- free(name);
return ret;
}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index f9f3de8..fcaf727 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -12,6 +12,7 @@ struct probe_trace_point {
char *symbol; /* Base symbol */
char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
+ unsigned long address; /* Actual address of the trace point */
bool retprobe; /* Return probe flag */
};
@@ -119,6 +120,12 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev);
/* Command string to line-range */
extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
+/* Release line range members */
+extern void line_range__clear(struct line_range *lr);
+
+/* Initialize line range */
+extern void line_range__init(struct line_range *lr);
+
/* Internal use: Return kernel/module path */
extern const char *kernel_get_module_path(const char *module);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index ffb657f..061edb1 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -226,10 +226,8 @@ struct debuginfo *debuginfo__new(const char *path)
if (!dbg)
return NULL;
- if (debuginfo__init_offline_dwarf(dbg, path) < 0) {
- free(dbg);
- dbg = NULL;
- }
+ if (debuginfo__init_offline_dwarf(dbg, path) < 0)
+ zfree(&dbg);
return dbg;
}
@@ -241,10 +239,8 @@ struct debuginfo *debuginfo__new_online_kernel(unsigned long addr)
if (!dbg)
return NULL;
- if (debuginfo__init_online_kernel_dwarf(dbg, (Dwarf_Addr)addr) < 0) {
- free(dbg);
- dbg = NULL;
- }
+ if (debuginfo__init_online_kernel_dwarf(dbg, (Dwarf_Addr)addr) < 0)
+ zfree(&dbg);
return dbg;
}
@@ -729,6 +725,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
return -ENOENT;
}
tp->offset = (unsigned long)(paddr - sym.st_value);
+ tp->address = (unsigned long)paddr;
tp->symbol = strdup(symbol);
if (!tp->symbol)
return -ENOMEM;
@@ -1301,8 +1298,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
ret = debuginfo__find_probes(dbg, &tf.pf);
if (ret < 0) {
- free(*tevs);
- *tevs = NULL;
+ zfree(tevs);
return ret;
}
@@ -1413,13 +1409,10 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
if (ret < 0) {
/* Free vlist for error */
while (af.nvls--) {
- if (af.vls[af.nvls].point.symbol)
- free(af.vls[af.nvls].point.symbol);
- if (af.vls[af.nvls].vars)
- strlist__delete(af.vls[af.nvls].vars);
+ zfree(&af.vls[af.nvls].point.symbol);
+ strlist__delete(af.vls[af.nvls].vars);
}
- free(af.vls);
- *vls = NULL;
+ zfree(vls);
return ret;
}
@@ -1523,10 +1516,7 @@ post:
if (fname) {
ppt->file = strdup(fname);
if (ppt->file == NULL) {
- if (ppt->function) {
- free(ppt->function);
- ppt->function = NULL;
- }
+ zfree(&ppt->function);
ret = -ENOMEM;
goto end;
}
@@ -1580,8 +1570,7 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
else
ret = 0; /* Lines are not found */
else {
- free(lf->lr->path);
- lf->lr->path = NULL;
+ zfree(&lf->lr->path);
}
return ret;
}
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 239036f..595bfc7 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -18,4 +18,5 @@ util/cgroup.c
util/rblist.c
util/strlist.c
util/fs.c
+util/trace-event.c
../../lib/rbtree.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 4bf8ace..122669c 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -908,9 +908,10 @@ static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
if (i >= pevlist->evlist.nr_entries)
return NULL;
- list_for_each_entry(pos, &pevlist->evlist.entries, node)
+ evlist__for_each(&pevlist->evlist, pos) {
if (i-- == 0)
break;
+ }
return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
}
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index c8845b1..3737625 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -74,8 +74,7 @@ bool perf_can_sample_identifier(void)
return perf_probe_api(perf_probe_sample_identifier);
}
-void perf_evlist__config(struct perf_evlist *evlist,
- struct perf_record_opts *opts)
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
{
struct perf_evsel *evsel;
bool use_sample_identifier = false;
@@ -90,19 +89,19 @@ void perf_evlist__config(struct perf_evlist *evlist,
if (evlist->cpus->map[0] < 0)
opts->no_inherit = true;
- list_for_each_entry(evsel, &evlist->entries, node)
+ evlist__for_each(evlist, evsel)
perf_evsel__config(evsel, opts);
if (evlist->nr_entries > 1) {
struct perf_evsel *first = perf_evlist__first(evlist);
- list_for_each_entry(evsel, &evlist->entries, node) {
+ evlist__for_each(evlist, evsel) {
if (evsel->attr.sample_type == first->attr.sample_type)
continue;
use_sample_identifier = perf_can_sample_identifier();
break;
}
- list_for_each_entry(evsel, &evlist->entries, node)
+ evlist__for_each(evlist, evsel)
perf_evsel__set_sample_id(evsel, use_sample_identifier);
}
@@ -123,7 +122,7 @@ static int get_max_rate(unsigned int *rate)
return filename__read_int(path, (int *) rate);
}
-static int perf_record_opts__config_freq(struct perf_record_opts *opts)
+static int record_opts__config_freq(struct record_opts *opts)
{
bool user_freq = opts->user_freq != UINT_MAX;
unsigned int max_rate;
@@ -173,7 +172,44 @@ static int perf_record_opts__config_freq(struct perf_record_opts *opts)
return 0;
}
-int perf_record_opts__config(struct perf_record_opts *opts)
+int record_opts__config(struct record_opts *opts)
{
- return perf_record_opts__config_freq(opts);
+ return record_opts__config_freq(opts);
+}
+
+bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
+{
+ struct perf_evlist *temp_evlist;
+ struct perf_evsel *evsel;
+ int err, fd, cpu;
+ bool ret = false;
+
+ temp_evlist = perf_evlist__new();
+ if (!temp_evlist)
+ return false;
+
+ err = parse_events(temp_evlist, str);
+ if (err)
+ goto out_delete;
+
+ evsel = perf_evlist__last(temp_evlist);
+
+ if (!evlist || cpu_map__empty(evlist->cpus)) {
+ struct cpu_map *cpus = cpu_map__new(NULL);
+
+ cpu = cpus ? cpus->map[0] : 0;
+ cpu_map__delete(cpus);
+ } else {
+ cpu = evlist->cpus->map[0];
+ }
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd >= 0) {
+ close(fd);
+ ret = true;
+ }
+
+out_delete:
+ perf_evlist__delete(temp_evlist);
+ return ret;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index d5e5969..e108207 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -194,8 +194,7 @@ static void define_event_symbols(struct event_format *event,
zero_flag_atom = 0;
break;
case PRINT_FIELD:
- if (cur_field_name)
- free(cur_field_name);
+ free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
@@ -257,12 +256,9 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
return event;
}
-static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
- struct perf_sample *sample,
+static void perl_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine __maybe_unused,
- struct thread *thread,
- struct addr_location *al)
+ struct thread *thread)
{
struct format_field *field;
static char handler[256];
@@ -349,10 +345,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
static void perl_process_event_generic(union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine __maybe_unused,
- struct thread *thread __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct perf_evsel *evsel)
{
dSP;
@@ -377,12 +370,11 @@ static void perl_process_event_generic(union perf_event *event,
static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine,
struct thread *thread,
- struct addr_location *al)
+ struct addr_location *al __maybe_unused)
{
- perl_process_tracepoint(event, sample, evsel, machine, thread, al);
- perl_process_event_generic(event, sample, evsel, machine, thread, al);
+ perl_process_tracepoint(sample, evsel, thread);
+ perl_process_event_generic(event, sample, evsel);
}
static void run_start_sub(void)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 53c20e7..cd9774d 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -161,8 +161,7 @@ static void define_event_symbols(struct event_format *event,
zero_flag_atom = 0;
break;
case PRINT_FIELD:
- if (cur_field_name)
- free(cur_field_name);
+ free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
@@ -231,13 +230,10 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
return event;
}
-static void python_process_tracepoint(union perf_event *perf_event
- __maybe_unused,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine __maybe_unused,
- struct thread *thread,
- struct addr_location *al)
+static void python_process_tracepoint(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256];
@@ -351,11 +347,8 @@ static void python_process_tracepoint(union perf_event *perf_event
Py_DECREF(t);
}
-static void python_process_general_event(union perf_event *perf_event
- __maybe_unused,
- struct perf_sample *sample,
+static void python_process_general_event(struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine __maybe_unused,
struct thread *thread,
struct addr_location *al)
{
@@ -411,22 +404,19 @@ exit:
Py_DECREF(t);
}
-static void python_process_event(union perf_event *perf_event,
+static void python_process_event(union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine,
struct thread *thread,
struct addr_location *al)
{
switch (evsel->attr.type) {
case PERF_TYPE_TRACEPOINT:
- python_process_tracepoint(perf_event, sample, evsel,
- machine, thread, al);
+ python_process_tracepoint(sample, evsel, thread, al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
- python_process_general_event(perf_event, sample, evsel,
- machine, thread, al);
+ python_process_general_event(sample, evsel, thread, al);
}
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index f36d24a..7acc03e 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -132,18 +132,18 @@ static void perf_session__delete_threads(struct perf_session *session)
static void perf_session_env__delete(struct perf_session_env *env)
{
- free(env->hostname);
- free(env->os_release);
- free(env->version);
- free(env->arch);
- free(env->cpu_desc);
- free(env->cpuid);
+ zfree(&env->hostname);
+ zfree(&env->os_release);
+ zfree(&env->version);
+ zfree(&env->arch);
+ zfree(&env->cpu_desc);
+ zfree(&env->cpuid);
- free(env->cmdline);
- free(env->sibling_cores);
- free(env->sibling_threads);
- free(env->numa_nodes);
- free(env->pmu_mappings);
+ zfree(&env->cmdline);
+ zfree(&env->sibling_cores);
+ zfree(&env->sibling_threads);
+ zfree(&env->numa_nodes);
+ zfree(&env->pmu_mappings);
}
void perf_session__delete(struct perf_session *session)
@@ -247,27 +247,6 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
}
}
-void mem_bswap_32(void *src, int byte_size)
-{
- u32 *m = src;
- while (byte_size > 0) {
- *m = bswap_32(*m);
- byte_size -= sizeof(u32);
- ++m;
- }
-}
-
-void mem_bswap_64(void *src, int byte_size)
-{
- u64 *m = src;
-
- while (byte_size > 0) {
- *m = bswap_64(*m);
- byte_size -= sizeof(u64);
- ++m;
- }
-}
-
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
@@ -851,6 +830,7 @@ static struct machine *
struct perf_sample *sample)
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct machine *machine;
if (perf_guest &&
((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
@@ -863,7 +843,11 @@ static struct machine *
else
pid = sample->pid;
- return perf_session__findnew_machine(session, pid);
+ machine = perf_session__find_machine(session, pid);
+ if (!machine)
+ machine = perf_session__findnew_machine(session,
+ DEFAULT_GUEST_KERNEL_ID);
+ return machine;
}
return &session->machines.host;
@@ -1158,7 +1142,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session,
void *buf = NULL;
int skip = 0;
u64 head;
- int err;
+ ssize_t err;
void *p;
perf_tool__fill_defaults(tool);
@@ -1400,7 +1384,7 @@ bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
struct perf_evsel *evsel;
- list_for_each_entry(evsel, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, evsel) {
if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
return true;
}
@@ -1458,7 +1442,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
ret += events_stats__fprintf(&session->stats, fp);
- list_for_each_entry(pos, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, pos) {
ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
ret += events_stats__fprintf(&pos->hists.stats, fp);
}
@@ -1480,35 +1464,30 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
{
struct perf_evsel *pos;
- list_for_each_entry(pos, &session->evlist->entries, node) {
+ evlist__for_each(session->evlist, pos) {
if (pos->attr.type == type)
return pos;
}
return NULL;
}
-void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
- struct perf_sample *sample, struct machine *machine,
+void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct addr_location *al,
unsigned int print_opts, unsigned int stack_depth)
{
- struct addr_location al;
struct callchain_cursor_node *node;
int print_ip = print_opts & PRINT_IP_OPT_IP;
int print_sym = print_opts & PRINT_IP_OPT_SYM;
int print_dso = print_opts & PRINT_IP_OPT_DSO;
int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
+ int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
char s = print_oneline ? ' ' : '\t';
- if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
- error("problem processing %d event, skipping it.\n",
- event->header.type);
- return;
- }
-
if (symbol_conf.use_callchain && sample->callchain) {
+ struct addr_location node_al;
- if (machine__resolve_callchain(machine, evsel, al.thread,
+ if (machine__resolve_callchain(al->machine, evsel, al->thread,
sample, NULL, NULL,
PERF_MAX_STACK_DEPTH) != 0) {
if (verbose)
@@ -1517,20 +1496,31 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
}
callchain_cursor_commit(&callchain_cursor);
+ if (print_symoffset)
+ node_al = *al;
+
while (stack_depth) {
+ u64 addr = 0;
+
node = callchain_cursor_current(&callchain_cursor);
if (!node)
break;
+ if (node->sym && node->sym->ignore)
+ goto next;
+
if (print_ip)
printf("%c%16" PRIx64, s, node->ip);
+ if (node->map)
+ addr = node->map->map_ip(node->map, node->ip);
+
if (print_sym) {
printf(" ");
if (print_symoffset) {
- al.addr = node->ip;
- al.map = node->map;
- symbol__fprintf_symname_offs(node->sym, &al, stdout);
+ node_al.addr = addr;
+ node_al.map = node->map;
+ symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
} else
symbol__fprintf_symname(node->sym, stdout);
}
@@ -1541,32 +1531,42 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
printf(")");
}
+ if (print_srcline)
+ map__fprintf_srcline(node->map, addr, "\n ",
+ stdout);
+
if (!print_oneline)
printf("\n");
- callchain_cursor_advance(&callchain_cursor);
-
stack_depth--;
+next:
+ callchain_cursor_advance(&callchain_cursor);
}
} else {
+ if (al->sym && al->sym->ignore)
+ return;
+
if (print_ip)
printf("%16" PRIx64, sample->ip);
if (print_sym) {
printf(" ");
if (print_symoffset)
- symbol__fprintf_symname_offs(al.sym, &al,
+ symbol__fprintf_symname_offs(al->sym, al,
stdout);
else
- symbol__fprintf_symname(al.sym, stdout);
+ symbol__fprintf_symname(al->sym, stdout);
}
if (print_dso) {
printf(" (");
- map__fprintf_dsoname(al.map, stdout);
+ map__fprintf_dsoname(al->map, stdout);
printf(")");
}
+
+ if (print_srcline)
+ map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
}
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 50f6409..3140f8a 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -1,6 +1,7 @@
#ifndef __PERF_SESSION_H
#define __PERF_SESSION_H
+#include "trace-event.h"
#include "hist.h"
#include "event.h"
#include "header.h"
@@ -32,7 +33,7 @@ struct perf_session {
struct perf_header header;
struct machines machines;
struct perf_evlist *evlist;
- struct pevent *pevent;
+ struct trace_event tevent;
struct events_stats stats;
bool repipe;
struct ordered_samples ordered_samples;
@@ -44,6 +45,7 @@ struct perf_session {
#define PRINT_IP_OPT_DSO (1<<2)
#define PRINT_IP_OPT_SYMOFFSET (1<<3)
#define PRINT_IP_OPT_ONELINE (1<<4)
+#define PRINT_IP_OPT_SRCLINE (1<<5)
struct perf_tool;
@@ -72,8 +74,6 @@ int perf_session__resolve_callchain(struct perf_session *session,
bool perf_session__has_traces(struct perf_session *session, const char *msg);
-void mem_bswap_64(void *src, int byte_size);
-void mem_bswap_32(void *src, int byte_size);
void perf_event__attr_swap(struct perf_event_attr *attr);
int perf_session__create_kernel_maps(struct perf_session *session);
@@ -105,8 +105,8 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
-void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
- struct perf_sample *sample, struct machine *machine,
+void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct addr_location *al,
unsigned int print_opts, unsigned int stack_depth);
int perf_session__cpu_bitmap(struct perf_session *session,
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 58ea5ca..d0aee4b 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -25,7 +25,7 @@ cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
-liblk = getenv('LIBLK')
+libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
@@ -34,7 +34,7 @@ perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
- extra_objects = [libtraceevent, liblk],
+ extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 8b0bb1f..635cd8f 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -13,6 +13,7 @@ int have_ignore_callees = 0;
int sort__need_collapse = 0;
int sort__has_parent = 0;
int sort__has_sym = 0;
+int sort__has_dso = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
enum sort_type sort__first_dimension;
@@ -161,6 +162,11 @@ struct sort_entry sort_dso = {
/* --sort symbol */
+static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
+{
+ return (int64_t)(right_ip - left_ip);
+}
+
static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
{
u64 ip_l, ip_r;
@@ -183,15 +189,17 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
int64_t ret;
if (!left->ms.sym && !right->ms.sym)
- return right->level - left->level;
+ return _sort__addr_cmp(left->ip, right->ip);
/*
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
- ret = sort__dso_cmp(left, right);
- if (ret != 0)
- return ret;
+ if (!sort__has_dso) {
+ ret = sort__dso_cmp(left, right);
+ if (ret != 0)
+ return ret;
+ }
return _sort__sym_cmp(left->ms.sym, right->ms.sym);
}
@@ -372,7 +380,7 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
struct addr_map_symbol *from_r = &right->branch_info->from;
if (!from_l->sym && !from_r->sym)
- return right->level - left->level;
+ return _sort__addr_cmp(from_l->addr, from_r->addr);
return _sort__sym_cmp(from_l->sym, from_r->sym);
}
@@ -384,7 +392,7 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
struct addr_map_symbol *to_r = &right->branch_info->to;
if (!to_l->sym && !to_r->sym)
- return right->level - left->level;
+ return _sort__addr_cmp(to_l->addr, to_r->addr);
return _sort__sym_cmp(to_l->sym, to_r->sym);
}
@@ -1056,6 +1064,8 @@ int sort_dimension__add(const char *tok)
sort__has_parent = 1;
} else if (sd->entry == &sort_sym) {
sort__has_sym = 1;
+ } else if (sd->entry == &sort_dso) {
+ sort__has_dso = 1;
}
__sort_dimension__add(sd, i);
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index d11aefb..f3e4bc5 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -129,7 +129,7 @@ static struct a2l_data *addr2line_init(const char *path)
out:
if (a2l) {
- free((void *)a2l->input);
+ zfree((char **)&a2l->input);
free(a2l);
}
bfd_close(abfd);
@@ -140,24 +140,30 @@ static void addr2line_cleanup(struct a2l_data *a2l)
{
if (a2l->abfd)
bfd_close(a2l->abfd);
- free((void *)a2l->input);
- free(a2l->syms);
+ zfree((char **)&a2l->input);
+ zfree(&a2l->syms);
free(a2l);
}
static int addr2line(const char *dso_name, unsigned long addr,
- char **file, unsigned int *line)
+ char **file, unsigned int *line, struct dso *dso)
{
int ret = 0;
- struct a2l_data *a2l;
+ struct a2l_data *a2l = dso->a2l;
+
+ if (!a2l) {
+ dso->a2l = addr2line_init(dso_name);
+ a2l = dso->a2l;
+ }
- a2l = addr2line_init(dso_name);
if (a2l == NULL) {
pr_warning("addr2line_init failed for %s\n", dso_name);
return 0;
}
a2l->addr = addr;
+ a2l->found = false;
+
bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
if (a2l->found && a2l->filename) {
@@ -168,14 +174,26 @@ static int addr2line(const char *dso_name, unsigned long addr,
ret = 1;
}
- addr2line_cleanup(a2l);
return ret;
}
+void dso__free_a2l(struct dso *dso)
+{
+ struct a2l_data *a2l = dso->a2l;
+
+ if (!a2l)
+ return;
+
+ addr2line_cleanup(a2l);
+
+ dso->a2l = NULL;
+}
+
#else /* HAVE_LIBBFD_SUPPORT */
static int addr2line(const char *dso_name, unsigned long addr,
- char **file, unsigned int *line_nr)
+ char **file, unsigned int *line_nr,
+ struct dso *dso __maybe_unused)
{
FILE *fp;
char cmd[PATH_MAX];
@@ -219,42 +237,58 @@ out:
pclose(fp);
return ret;
}
+
+void dso__free_a2l(struct dso *dso __maybe_unused)
+{
+}
+
#endif /* HAVE_LIBBFD_SUPPORT */
+/*
+ * Number of addr2line failures (without success) before disabling it for that
+ * dso.
+ */
+#define A2L_FAIL_LIMIT 123
+
char *get_srcline(struct dso *dso, unsigned long addr)
{
char *file = NULL;
unsigned line = 0;
char *srcline;
- char *dso_name = dso->long_name;
- size_t size;
+ const char *dso_name;
if (!dso->has_srcline)
return SRCLINE_UNKNOWN;
+ if (dso->symsrc_filename)
+ dso_name = dso->symsrc_filename;
+ else
+ dso_name = dso->long_name;
+
if (dso_name[0] == '[')
goto out;
if (!strncmp(dso_name, "/tmp/perf-", 10))
goto out;
- if (!addr2line(dso_name, addr, &file, &line))
+ if (!addr2line(dso_name, addr, &file, &line, dso))
goto out;
- /* just calculate actual length */
- size = snprintf(NULL, 0, "%s:%u", file, line) + 1;
+ if (asprintf(&srcline, "%s:%u", file, line) < 0) {
+ free(file);
+ goto out;
+ }
- srcline = malloc(size);
- if (srcline)
- snprintf(srcline, size, "%s:%u", file, line);
- else
- srcline = SRCLINE_UNKNOWN;
+ dso->a2l_fails = 0;
free(file);
return srcline;
out:
- dso->has_srcline = 0;
+ if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
+ dso->has_srcline = 0;
+ dso__free_a2l(dso);
+ }
return SRCLINE_UNKNOWN;
}
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index cfa9068..4abe235 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -28,7 +28,7 @@ void strbuf_init(struct strbuf *sb, ssize_t hint)
void strbuf_release(struct strbuf *sb)
{
if (sb->alloc) {
- free(sb->buf);
+ zfree(&sb->buf);
strbuf_init(sb, 0);
}
}
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
index 3edd053..79a757a 100644
--- a/tools/perf/util/strfilter.c
+++ b/tools/perf/util/strfilter.c
@@ -14,7 +14,7 @@ static void strfilter_node__delete(struct strfilter_node *node)
{
if (node) {
if (node->p && !is_operator(*node->p))
- free((char *)node->p);
+ zfree((char **)&node->p);
strfilter_node__delete(node->l);
strfilter_node__delete(node->r);
free(node);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index f0b0c00..2553e5b 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -128,7 +128,7 @@ void argv_free(char **argv)
{
char **p;
for (p = argv; *p; p++)
- free(*p);
+ zfree(p);
free(argv);
}
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index eabdce0..71f9d10 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -5,6 +5,7 @@
*/
#include "strlist.h"
+#include "util.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
@@ -38,7 +39,7 @@ out_delete:
static void str_node__delete(struct str_node *snode, bool dupstr)
{
if (dupstr)
- free((void *)snode->s);
+ zfree((char **)&snode->s);
free(snode);
}
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 96c8660..43262b8 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -17,8 +17,12 @@
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
+#include <linux/bitops.h>
+#include "perf.h"
#include "svghelper.h"
+#include "util.h"
+#include "cpumap.h"
static u64 first_time, last_time;
static u64 turbo_frequency, max_freq;
@@ -28,6 +32,8 @@ static u64 turbo_frequency, max_freq;
#define SLOT_HEIGHT 25.0
int svg_page_width = 1000;
+u64 svg_highlight;
+const char *svg_highlight_name;
#define MIN_TEXT_SIZE 0.01
@@ -39,9 +45,14 @@ static double cpu2slot(int cpu)
return 2 * cpu + 1;
}
+static int *topology_map;
+
static double cpu2y(int cpu)
{
- return cpu2slot(cpu) * SLOT_MULT;
+ if (topology_map)
+ return cpu2slot(topology_map[cpu]) * SLOT_MULT;
+ else
+ return cpu2slot(cpu) * SLOT_MULT;
}
static double time2pixels(u64 __time)
@@ -95,6 +106,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
+ fprintf(svgfile, "<!DOCTYPE svg SYSTEM \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n");
fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
@@ -103,6 +115,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.sample_hi{ fill:rgb(255,128, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
@@ -128,14 +141,42 @@ void svg_box(int Yslot, u64 start, u64 end, const char *type)
time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
}
-void svg_sample(int Yslot, int cpu, u64 start, u64 end)
+static char *time_to_string(u64 duration);
+void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<g>\n");
+ fprintf(svgfile, "<title>#%d blocked %s</title>\n", cpu,
+ time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Blocked on:\n%s</desc>\n", backtrace);
+ svg_box(Yslot, start, end, "blocked");
+ fprintf(svgfile, "</g>\n");
+}
+
+void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
double text_size;
+ const char *type;
+
if (!svgfile)
return;
- fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"sample\"/>\n",
- time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT);
+ if (svg_highlight && end - start > svg_highlight)
+ type = "sample_hi";
+ else
+ type = "sample";
+ fprintf(svgfile, "<g>\n");
+
+ fprintf(svgfile, "<title>#%d running %s</title>\n",
+ cpu, time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT,
+ type);
text_size = (time2pixels(end)-time2pixels(start));
if (cpu > 9)
@@ -148,6 +189,7 @@ void svg_sample(int Yslot, int cpu, u64 start, u64 end)
fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
+ fprintf(svgfile, "</g>\n");
}
static char *time_to_string(u64 duration)
@@ -168,7 +210,7 @@ static char *time_to_string(u64 duration)
return text;
}
-void svg_waiting(int Yslot, u64 start, u64 end)
+void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
char *text;
const char *style;
@@ -192,6 +234,9 @@ void svg_waiting(int Yslot, u64 start, u64 end)
font_size = round_text_size(font_size);
fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
+ fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Waiting on:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
if (font_size > MIN_TEXT_SIZE)
@@ -242,28 +287,42 @@ void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
max_freq = __max_freq;
turbo_frequency = __turbo_freq;
+ fprintf(svgfile, "<g>\n");
+
fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
time2pixels(first_time),
time2pixels(last_time)-time2pixels(first_time),
cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
- sprintf(cpu_string, "CPU %i", (int)cpu+1);
+ sprintf(cpu_string, "CPU %i", (int)cpu);
fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
+
+ fprintf(svgfile, "</g>\n");
}
-void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name)
+void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace)
{
double width;
+ const char *type;
if (!svgfile)
return;
+ if (svg_highlight && end - start >= svg_highlight)
+ type = "sample_hi";
+ else if (svg_highlight_name && strstr(name, svg_highlight_name))
+ type = "sample_hi";
+ else
+ type = "sample";
fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
+ fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
width = time2pixels(end)-time2pixels(start);
@@ -288,6 +347,8 @@ void svg_cstate(int cpu, u64 start, u64 end, int type)
return;
+ fprintf(svgfile, "<g>\n");
+
if (type > 6)
type = 6;
sprintf(style, "c%i", type);
@@ -306,6 +367,8 @@ void svg_cstate(int cpu, u64 start, u64 end, int type)
if (width > MIN_TEXT_SIZE)
fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
time2pixels(start), cpu2y(cpu)+width, width, type);
+
+ fprintf(svgfile, "</g>\n");
}
static char *HzToHuman(unsigned long hz)
@@ -339,6 +402,8 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
if (!svgfile)
return;
+ fprintf(svgfile, "<g>\n");
+
if (max_freq)
height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
@@ -347,10 +412,11 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
time2pixels(start), height+0.9, HzToHuman(freq));
+ fprintf(svgfile, "</g>\n");
}
-void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2)
+void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace)
{
double height;
@@ -358,6 +424,15 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc
return;
+ fprintf(svgfile, "<g>\n");
+
+ fprintf(svgfile, "<title>%s wakes up %s</title>\n",
+ desc1 ? desc1 : "?",
+ desc2 ? desc2 : "?");
+
+ if (backtrace)
+ fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
+
if (row1 < row2) {
if (row1) {
fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
@@ -395,9 +470,11 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc
if (row1)
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
time2pixels(start), height);
+
+ fprintf(svgfile, "</g>\n");
}
-void svg_wakeline(u64 start, int row1, int row2)
+void svg_wakeline(u64 start, int row1, int row2, const char *backtrace)
{
double height;
@@ -405,6 +482,11 @@ void svg_wakeline(u64 start, int row1, int row2)
return;
+ fprintf(svgfile, "<g>\n");
+
+ if (backtrace)
+ fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
+
if (row1 < row2)
fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT);
@@ -417,17 +499,28 @@ void svg_wakeline(u64 start, int row1, int row2)
height += SLOT_HEIGHT;
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
time2pixels(start), height);
+
+ fprintf(svgfile, "</g>\n");
}
-void svg_interrupt(u64 start, int row)
+void svg_interrupt(u64 start, int row, const char *backtrace)
{
if (!svgfile)
return;
+ fprintf(svgfile, "<g>\n");
+
+ fprintf(svgfile, "<title>Wakeup from interrupt</title>\n");
+
+ if (backtrace)
+ fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
+
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
time2pixels(start), row * SLOT_MULT);
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
+
+ fprintf(svgfile, "</g>\n");
}
void svg_text(int Yslot, u64 start, const char *text)
@@ -455,6 +548,7 @@ void svg_legenda(void)
if (!svgfile)
return;
+ fprintf(svgfile, "<g>\n");
svg_legenda_box(0, "Running", "sample");
svg_legenda_box(100, "Idle","c1");
svg_legenda_box(200, "Deeper Idle", "c3");
@@ -462,6 +556,7 @@ void svg_legenda(void)
svg_legenda_box(550, "Sleeping", "process2");
svg_legenda_box(650, "Waiting for cpu", "waiting");
svg_legenda_box(800, "Blocked on IO", "blocked");
+ fprintf(svgfile, "</g>\n");
}
void svg_time_grid(void)
@@ -499,3 +594,123 @@ void svg_close(void)
svgfile = NULL;
}
}
+
+#define cpumask_bits(maskp) ((maskp)->bits)
+typedef struct { DECLARE_BITMAP(bits, MAX_NR_CPUS); } cpumask_t;
+
+struct topology {
+ cpumask_t *sib_core;
+ int sib_core_nr;
+ cpumask_t *sib_thr;
+ int sib_thr_nr;
+};
+
+static void scan_thread_topology(int *map, struct topology *t, int cpu, int *pos)
+{
+ int i;
+ int thr;
+
+ for (i = 0; i < t->sib_thr_nr; i++) {
+ if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i])))
+ continue;
+
+ for_each_set_bit(thr,
+ cpumask_bits(&t->sib_thr[i]),
+ MAX_NR_CPUS)
+ if (map[thr] == -1)
+ map[thr] = (*pos)++;
+ }
+}
+
+static void scan_core_topology(int *map, struct topology *t)
+{
+ int pos = 0;
+ int i;
+ int cpu;
+
+ for (i = 0; i < t->sib_core_nr; i++)
+ for_each_set_bit(cpu,
+ cpumask_bits(&t->sib_core[i]),
+ MAX_NR_CPUS)
+ scan_thread_topology(map, t, cpu, &pos);
+}
+
+static int str_to_bitmap(char *s, cpumask_t *b)
+{
+ int i;
+ int ret = 0;
+ struct cpu_map *m;
+ int c;
+
+ m = cpu_map__new(s);
+ if (!m)
+ return -1;
+
+ for (i = 0; i < m->nr; i++) {
+ c = m->map[i];
+ if (c >= MAX_NR_CPUS) {
+ ret = -1;
+ break;
+ }
+
+ set_bit(c, cpumask_bits(b));
+ }
+
+ cpu_map__delete(m);
+
+ return ret;
+}
+
+int svg_build_topology_map(char *sib_core, int sib_core_nr,
+ char *sib_thr, int sib_thr_nr)
+{
+ int i;
+ struct topology t;
+
+ t.sib_core_nr = sib_core_nr;
+ t.sib_thr_nr = sib_thr_nr;
+ t.sib_core = calloc(sib_core_nr, sizeof(cpumask_t));
+ t.sib_thr = calloc(sib_thr_nr, sizeof(cpumask_t));
+
+ if (!t.sib_core || !t.sib_thr) {
+ fprintf(stderr, "topology: no memory\n");
+ goto exit;
+ }
+
+ for (i = 0; i < sib_core_nr; i++) {
+ if (str_to_bitmap(sib_core, &t.sib_core[i])) {
+ fprintf(stderr, "topology: can't parse siblings map\n");
+ goto exit;
+ }
+
+ sib_core += strlen(sib_core) + 1;
+ }
+
+ for (i = 0; i < sib_thr_nr; i++) {
+ if (str_to_bitmap(sib_thr, &t.sib_thr[i])) {
+ fprintf(stderr, "topology: can't parse siblings map\n");
+ goto exit;
+ }
+
+ sib_thr += strlen(sib_thr) + 1;
+ }
+
+ topology_map = malloc(sizeof(int) * MAX_NR_CPUS);
+ if (!topology_map) {
+ fprintf(stderr, "topology: no memory\n");
+ goto exit;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++)
+ topology_map[i] = -1;
+
+ scan_core_topology(topology_map, &t);
+
+ return 0;
+
+exit:
+ zfree(&t.sib_core);
+ zfree(&t.sib_thr);
+
+ return -1;
+}
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
index e078198..f7b4d6e 100644
--- a/tools/perf/util/svghelper.h
+++ b/tools/perf/util/svghelper.h
@@ -5,24 +5,29 @@
extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
-extern void svg_sample(int Yslot, int cpu, u64 start, u64 end);
-extern void svg_waiting(int Yslot, u64 start, u64 end);
+extern void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
+extern void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
+extern void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
-extern void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name);
+extern void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace);
extern void svg_cstate(int cpu, u64 start, u64 end, int type);
extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
extern void svg_time_grid(void);
extern void svg_legenda(void);
-extern void svg_wakeline(u64 start, int row1, int row2);
-extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2);
-extern void svg_interrupt(u64 start, int row);
+extern void svg_wakeline(u64 start, int row1, int row2, const char *backtrace);
+extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace);
+extern void svg_interrupt(u64 start, int row, const char *backtrace);
extern void svg_text(int Yslot, u64 start, const char *text);
extern void svg_close(void);
+extern int svg_build_topology_map(char *sib_core, int sib_core_nr,
+ char *sib_thr, int sib_thr_nr);
extern int svg_page_width;
+extern u64 svg_highlight;
+extern const char *svg_highlight_name;
#endif /* __PERF_SVGHELPER_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index eed0b96..7594567 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,7 @@
#include <inttypes.h>
#include "symbol.h"
+#include <symbol/kallsyms.h>
#include "debug.h"
#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
@@ -135,9 +136,8 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
return -1;
}
-static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
- GElf_Shdr *shp, const char *name,
- size_t *idx)
+Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name, size_t *idx)
{
Elf_Scn *sec = NULL;
size_t cnt = 1;
@@ -553,7 +553,7 @@ bool symsrc__has_symtab(struct symsrc *ss)
void symsrc__destroy(struct symsrc *ss)
{
- free(ss->name);
+ zfree(&ss->name);
elf_end(ss->elf);
close(ss->fd);
}
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 2d2dd05..bd15f49 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -1,4 +1,5 @@
#include "symbol.h"
+#include "util.h"
#include <stdio.h>
#include <fcntl.h>
@@ -253,6 +254,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused,
if (!ss->name)
goto out_close;
+ ss->fd = fd;
ss->type = type;
return 0;
@@ -274,7 +276,7 @@ bool symsrc__has_symtab(struct symsrc *ss __maybe_unused)
void symsrc__destroy(struct symsrc *ss)
{
- free(ss->name);
+ zfree(&ss->name);
close(ss->fd);
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c0c3696..39ce9ad 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -18,12 +18,9 @@
#include <elf.h>
#include <limits.h>
+#include <symbol/kallsyms.h>
#include <sys/utsname.h>
-#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
-#endif
-
static int dso__load_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter);
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
@@ -446,62 +443,6 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
return ret;
}
-int kallsyms__parse(const char *filename, void *arg,
- int (*process_symbol)(void *arg, const char *name,
- char type, u64 start))
-{
- char *line = NULL;
- size_t n;
- int err = -1;
- FILE *file = fopen(filename, "r");
-
- if (file == NULL)
- goto out_failure;
-
- err = 0;
-
- while (!feof(file)) {
- u64 start;
- int line_len, len;
- char symbol_type;
- char *symbol_name;
-
- line_len = getline(&line, &n, file);
- if (line_len < 0 || !line)
- break;
-
- line[--line_len] = '\0'; /* \n */
-
- len = hex2u64(line, &start);
-
- len++;
- if (len + 2 >= line_len)
- continue;
-
- symbol_type = line[len];
- len += 2;
- symbol_name = line + len;
- len = line_len - len;
-
- if (len >= KSYM_NAME_LEN) {
- err = -1;
- break;
- }
-
- err = process_symbol(arg, symbol_name,
- symbol_type, start);
- if (err)
- break;
- }
-
- free(line);
- fclose(file);
- return err;
-
-out_failure:
- return -1;
-}
-
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
u64 start))
@@ -565,12 +506,34 @@ struct process_kallsyms_args {
struct dso *dso;
};
-static u8 kallsyms2elf_type(char type)
+bool symbol__is_idle(struct symbol *sym)
{
- if (type == 'W')
- return STB_WEAK;
+ const char * const idle_symbols[] = {
+ "cpu_idle",
+ "intel_idle",
+ "default_idle",
+ "native_safe_halt",
+ "enter_idle",
+ "exit_idle",
+ "mwait_idle",
+ "mwait_idle_with_hints",
+ "poll_idle",
+ "ppc64_runlatch_off",
+ "pseries_dedicated_idle_sleep",
+ NULL
+ };
+
+ int i;
+
+ if (!sym)
+ return false;
+
+ for (i = 0; idle_symbols[i]; i++) {
+ if (!strcmp(idle_symbols[i], sym->name))
+ return true;
+ }
- return isupper(type) ? STB_GLOBAL : STB_LOCAL;
+ return false;
}
static int map__process_kallsym_symbol(void *arg, const char *name,
@@ -833,7 +796,7 @@ static void delete_modules(struct rb_root *modules)
mi = rb_entry(next, struct module_info, rb_node);
next = rb_next(&mi->rb_node);
rb_erase(&mi->rb_node, modules);
- free(mi->name);
+ zfree(&mi->name);
free(mi);
}
}
@@ -1126,10 +1089,10 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
* dso__data_read_addr().
*/
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
- dso->data_type = DSO_BINARY_TYPE__GUEST_KCORE;
+ dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
else
- dso->data_type = DSO_BINARY_TYPE__KCORE;
- dso__set_long_name(dso, strdup(kcore_filename));
+ dso->binary_type = DSO_BINARY_TYPE__KCORE;
+ dso__set_long_name(dso, strdup(kcore_filename), true);
close(fd);
@@ -1295,8 +1258,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
enum dso_binary_type symtab_type = binary_type_symtab[i];
- if (dso__binary_type_file(dso, symtab_type,
- root_dir, name, PATH_MAX))
+ if (dso__read_binary_type_filename(dso, symtab_type,
+ root_dir, name, PATH_MAX))
continue;
/* Name is now the name of the next image to try */
@@ -1306,6 +1269,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
if (!syms_ss && symsrc__has_symtab(ss)) {
syms_ss = ss;
next_slot = true;
+ if (!dso->symsrc_filename)
+ dso->symsrc_filename = strdup(name);
}
if (!runtime_ss && symsrc__possibly_runtime(ss)) {
@@ -1376,7 +1341,8 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
}
int dso__load_vmlinux(struct dso *dso, struct map *map,
- const char *vmlinux, symbol_filter_t filter)
+ const char *vmlinux, bool vmlinux_allocated,
+ symbol_filter_t filter)
{
int err = -1;
struct symsrc ss;
@@ -1402,10 +1368,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
if (err > 0) {
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
- dso->data_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
else
- dso->data_type = DSO_BINARY_TYPE__VMLINUX;
- dso__set_long_name(dso, (char *)vmlinux);
+ dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
+ dso__set_long_name(dso, vmlinux, vmlinux_allocated);
dso__set_loaded(dso, map->type);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
}
@@ -1424,21 +1390,16 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
filename = dso__build_id_filename(dso, NULL, 0);
if (filename != NULL) {
- err = dso__load_vmlinux(dso, map, filename, filter);
- if (err > 0) {
- dso->lname_alloc = 1;
+ err = dso__load_vmlinux(dso, map, filename, true, filter);
+ if (err > 0)
goto out;
- }
free(filename);
}
for (i = 0; i < vmlinux_path__nr_entries; ++i) {
- err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter);
- if (err > 0) {
- dso__set_long_name(dso, strdup(vmlinux_path[i]));
- dso->lname_alloc = 1;
+ err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
+ if (err > 0)
break;
- }
}
out:
return err;
@@ -1496,14 +1457,15 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+ scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
+ sbuild_id);
+
/* Use /proc/kallsyms if possible */
if (is_host) {
DIR *d;
int fd;
/* If no cached kcore go with /proc/kallsyms */
- scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s",
- buildid_dir, sbuild_id);
d = opendir(path);
if (!d)
goto proc_kallsyms;
@@ -1528,6 +1490,10 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
goto proc_kallsyms;
}
+ /* Find kallsyms in build-id cache with kcore */
+ if (!find_matching_kcore(map, path, sizeof(path)))
+ return strdup(path);
+
scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
buildid_dir, sbuild_id);
@@ -1570,15 +1536,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
}
if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
- err = dso__load_vmlinux(dso, map,
- symbol_conf.vmlinux_name, filter);
- if (err > 0) {
- dso__set_long_name(dso,
- strdup(symbol_conf.vmlinux_name));
- dso->lname_alloc = 1;
- return err;
- }
- return err;
+ return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name,
+ false, filter);
}
if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
@@ -1604,7 +1563,7 @@ do_kallsyms:
free(kallsyms_allocated_filename);
if (err > 0 && !dso__is_kcore(dso)) {
- dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
+ dso__set_long_name(dso, "[kernel.kallsyms]", false);
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1634,7 +1593,8 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
*/
if (symbol_conf.default_guest_vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map,
- symbol_conf.default_guest_vmlinux_name, filter);
+ symbol_conf.default_guest_vmlinux_name,
+ false, filter);
return err;
}
@@ -1651,7 +1611,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
pr_debug("Using %s for symbols\n", kallsyms_filename);
if (err > 0 && !dso__is_kcore(dso)) {
machine__mmap_name(machine, path, sizeof(path));
- dso__set_long_name(dso, strdup(path));
+ dso__set_long_name(dso, strdup(path), true);
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1661,13 +1621,10 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
static void vmlinux_path__exit(void)
{
- while (--vmlinux_path__nr_entries >= 0) {
- free(vmlinux_path[vmlinux_path__nr_entries]);
- vmlinux_path[vmlinux_path__nr_entries] = NULL;
- }
+ while (--vmlinux_path__nr_entries >= 0)
+ zfree(&vmlinux_path[vmlinux_path__nr_entries]);
- free(vmlinux_path);
- vmlinux_path = NULL;
+ zfree(&vmlinux_path);
}
static int vmlinux_path__init(void)
@@ -1719,7 +1676,7 @@ out_fail:
return -1;
}
-static int setup_list(struct strlist **list, const char *list_str,
+int setup_list(struct strlist **list, const char *list_str,
const char *list_name)
{
if (list_str == NULL)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 07de8fe..fffe288 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -52,6 +52,11 @@ static inline char *bfd_demangle(void __maybe_unused *v,
# define PERF_ELF_C_READ_MMAP ELF_C_READ
#endif
+#ifdef HAVE_LIBELF_SUPPORT
+extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name, size_t *idx);
+#endif
+
#ifndef DMGL_PARAMS
#define DMGL_PARAMS (1 << 0) /* Include function args */
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
@@ -164,6 +169,7 @@ struct mem_info {
};
struct addr_location {
+ struct machine *machine;
struct thread *thread;
struct map *map;
struct symbol *sym;
@@ -206,7 +212,8 @@ bool symsrc__possibly_runtime(struct symsrc *ss);
int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter);
int dso__load_vmlinux(struct dso *dso, struct map *map,
- const char *vmlinux, symbol_filter_t filter);
+ const char *vmlinux, bool vmlinux_allocated,
+ symbol_filter_t filter);
int dso__load_vmlinux_path(struct dso *dso, struct map *map,
symbol_filter_t filter);
int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
@@ -220,9 +227,6 @@ struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
-int kallsyms__parse(const char *filename, void *arg,
- int (*process_symbol)(void *arg, const char *name,
- char type, u64 start));
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
u64 start));
@@ -240,6 +244,7 @@ size_t symbol__fprintf(struct symbol *sym, FILE *fp);
bool symbol_type__is_a(char symbol_type, enum map_type map_type);
bool symbol__restricted_filename(const char *filename,
const char *restricted_filename);
+bool symbol__is_idle(struct symbol *sym);
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, symbol_filter_t filter,
@@ -273,4 +278,7 @@ void kcore_extract__delete(struct kcore_extract *kce);
int kcore_copy(const char *from_dir, const char *to_dir);
int compare_proc_modules(const char *from, const char *to);
+int setup_list(struct strlist **list, const char *list_str,
+ const char *list_name);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 3c778a0..e74c596 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -55,6 +55,13 @@ enum target_errno target__validate(struct target *target)
ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
}
+ /* THREAD and SYSTEM/CPU are mutually exclusive */
+ if (target->per_thread && (target->system_wide || target->cpu_list)) {
+ target->per_thread = false;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD;
+ }
+
return ret;
}
@@ -100,6 +107,7 @@ static const char *target__error_str[] = {
"UID switch overriding CPU",
"PID/TID switch overriding SYSTEM",
"UID switch overriding SYSTEM",
+ "SYSTEM/CPU switch overriding PER-THREAD",
"Invalid User: %s",
"Problems obtaining information for user %s",
};
@@ -131,7 +139,8 @@ int target__strerror(struct target *target, int errnum,
msg = target__error_str[idx];
switch (errnum) {
- case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM:
+ case TARGET_ERRNO__PID_OVERRIDE_CPU ...
+ TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD:
snprintf(buf, buflen, "%s", msg);
break;
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 2d0c506..7381b1c 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -12,7 +12,8 @@ struct target {
uid_t uid;
bool system_wide;
bool uses_mmap;
- bool force_per_cpu;
+ bool default_per_cpu;
+ bool per_thread;
};
enum target_errno {
@@ -33,6 +34,7 @@ enum target_errno {
TARGET_ERRNO__UID_OVERRIDE_CPU,
TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
+ TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD,
/* for target__parse_uid() */
TARGET_ERRNO__INVALID_UID,
@@ -61,4 +63,17 @@ static inline bool target__none(struct target *target)
return !target__has_task(target) && !target__has_cpu(target);
}
+static inline bool target__uses_dummy_map(struct target *target)
+{
+ bool use_dummy = false;
+
+ if (target->default_per_cpu)
+ use_dummy = target->per_thread ? true : false;
+ else if (target__has_task(target) ||
+ (!target__has_cpu(target) && !target->uses_mmap))
+ use_dummy = true;
+
+ return use_dummy;
+}
+
#endif /* _PERF_TARGET_H */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 49eaf1d..0358882 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -66,10 +66,13 @@ struct comm *thread__comm(const struct thread *thread)
int thread__set_comm(struct thread *thread, const char *str, u64 timestamp)
{
struct comm *new, *curr = thread__comm(thread);
+ int err;
/* Override latest entry if it had no specific time coverage */
if (!curr->start) {
- comm__override(curr, str, timestamp);
+ err = comm__override(curr, str, timestamp);
+ if (err)
+ return err;
} else {
new = comm__new(str, timestamp);
if (!new)
@@ -126,7 +129,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
if (!comm)
return -ENOMEM;
err = thread__set_comm(thread, comm, timestamp);
- if (!err)
+ if (err)
return err;
thread->comm_set = true;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 897c1b2..5b856bf 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -6,6 +6,7 @@
#include <unistd.h>
#include <sys/types.h>
#include "symbol.h"
+#include <strlist.h>
struct thread {
union {
@@ -66,4 +67,15 @@ static inline void thread__set_priv(struct thread *thread, void *p)
{
thread->priv = p;
}
+
+static inline bool thread__is_filtered(struct thread *thread)
+{
+ if (symbol_conf.comm_list &&
+ !strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) {
+ return true;
+ }
+
+ return false;
+}
+
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 9b5f856..5d32159 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -9,6 +9,7 @@
#include "strlist.h"
#include <string.h>
#include "thread_map.h"
+#include "util.h"
/* Skip "." and ".." directories */
static int filter(const struct dirent *dir)
@@ -40,7 +41,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
}
for (i=0; i<items; i++)
- free(namelist[i]);
+ zfree(&namelist[i]);
free(namelist);
return threads;
@@ -117,7 +118,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
threads->map[threads->nr + i] = atoi(namelist[i]->d_name);
for (i = 0; i < items; i++)
- free(namelist[i]);
+ zfree(&namelist[i]);
free(namelist);
threads->nr += items;
@@ -134,12 +135,11 @@ out_free_threads:
out_free_namelist:
for (i = 0; i < items; i++)
- free(namelist[i]);
+ zfree(&namelist[i]);
free(namelist);
out_free_closedir:
- free(threads);
- threads = NULL;
+ zfree(&threads);
goto out_closedir;
}
@@ -194,7 +194,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
for (i = 0; i < items; i++) {
threads->map[j++] = atoi(namelist[i]->d_name);
- free(namelist[i]);
+ zfree(&namelist[i]);
}
threads->nr = total_tasks;
free(namelist);
@@ -206,12 +206,11 @@ out:
out_free_namelist:
for (i = 0; i < items; i++)
- free(namelist[i]);
+ zfree(&namelist[i]);
free(namelist);
out_free_threads:
- free(threads);
- threads = NULL;
+ zfree(&threads);
goto out;
}
@@ -262,8 +261,7 @@ out:
return threads;
out_free_threads:
- free(threads);
- threads = NULL;
+ zfree(&threads);
goto out;
}
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index ce793c7..8e517de 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -26,7 +26,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
float samples_per_sec;
float ksamples_per_sec;
float esamples_percent;
- struct perf_record_opts *opts = &top->record_opts;
+ struct record_opts *opts = &top->record_opts;
struct target *target = &opts->target;
size_t ret = 0;
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 88cfeaf..dab14d0 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -14,7 +14,7 @@ struct perf_session;
struct perf_top {
struct perf_tool tool;
struct perf_evlist *evlist;
- struct perf_record_opts record_opts;
+ struct record_opts record_opts;
/*
* Symbols will be added here in perf_event__process_sample and will
* get out after decayed.
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index f3c9e55..7e6fcfe 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -38,7 +38,7 @@
#include "../perf.h"
#include "trace-event.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include "evsel.h"
#define VERSION "0.5"
@@ -397,8 +397,8 @@ put_tracepoints_path(struct tracepoint_path *tps)
struct tracepoint_path *t = tps;
tps = tps->next;
- free(t->name);
- free(t->system);
+ zfree(&t->name);
+ zfree(&t->system);
free(t);
}
}
@@ -562,10 +562,8 @@ out:
output_fd = fd;
}
- if (err) {
- free(tdata);
- tdata = NULL;
- }
+ if (err)
+ zfree(&tdata);
put_tracepoints_path(tps);
return tdata;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 6681f71..e0d6d07f 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -28,19 +28,6 @@
#include "util.h"
#include "trace-event.h"
-struct pevent *read_trace_init(int file_bigendian, int host_bigendian)
-{
- struct pevent *pevent = pevent_alloc();
-
- if (pevent != NULL) {
- pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
- pevent_set_file_bigendian(pevent, file_bigendian);
- pevent_set_host_bigendian(pevent, host_bigendian);
- }
-
- return pevent;
-}
-
static int get_common_field(struct scripting_context *context,
int *offset, int *size, const char *type)
{
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index f211227..e113e18 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -343,7 +343,7 @@ static int read_event_files(struct pevent *pevent)
return 0;
}
-ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
+ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
{
char buf[BUFSIZ];
char test[] = { 23, 8, 68 };
@@ -356,11 +356,9 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
int host_bigendian;
int file_long_size;
int file_page_size;
- struct pevent *pevent;
+ struct pevent *pevent = NULL;
int err;
- *ppevent = NULL;
-
repipe = __repipe;
input_fd = fd;
@@ -390,12 +388,17 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
file_bigendian = buf[0];
host_bigendian = bigendian();
- pevent = read_trace_init(file_bigendian, host_bigendian);
- if (pevent == NULL) {
- pr_debug("read_trace_init failed");
+ if (trace_event__init(tevent)) {
+ pr_debug("trace_event__init failed");
goto out;
}
+ pevent = tevent->pevent;
+
+ pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+ pevent_set_file_bigendian(pevent, file_bigendian);
+ pevent_set_host_bigendian(pevent, host_bigendian);
+
if (do_read(buf, 1) < 0)
goto out;
file_long_size = buf[0];
@@ -432,11 +435,10 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
pevent_print_printk(pevent);
}
- *ppevent = pevent;
pevent = NULL;
out:
if (pevent)
- pevent_free(pevent);
+ trace_event__cleanup(tevent);
return size;
}
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 95199e4..57aaccc 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -38,9 +38,8 @@ static int stop_script_unsupported(void)
static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
- struct machine *machine __maybe_unused,
struct thread *thread __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct addr_location *al __maybe_unused)
{
}
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
new file mode 100644
index 0000000..6322d37
--- /dev/null
+++ b/tools/perf/util/trace-event.c
@@ -0,0 +1,82 @@
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <linux/kernel.h>
+#include <traceevent/event-parse.h>
+#include "trace-event.h"
+#include "util.h"
+
+/*
+ * global trace_event object used by trace_event__tp_format
+ *
+ * TODO There's no cleanup call for this. Add some sort of
+ * __exit function support and call trace_event__cleanup
+ * there.
+ */
+static struct trace_event tevent;
+
+int trace_event__init(struct trace_event *t)
+{
+ struct pevent *pevent = pevent_alloc();
+
+ if (pevent) {
+ t->plugin_list = traceevent_load_plugins(pevent);
+ t->pevent = pevent;
+ }
+
+ return pevent ? 0 : -1;
+}
+
+void trace_event__cleanup(struct trace_event *t)
+{
+ traceevent_unload_plugins(t->plugin_list, t->pevent);
+ pevent_free(t->pevent);
+}
+
+static struct event_format*
+tp_format(const char *sys, const char *name)
+{
+ struct pevent *pevent = tevent.pevent;
+ struct event_format *event = NULL;
+ char path[PATH_MAX];
+ size_t size;
+ char *data;
+
+ scnprintf(path, PATH_MAX, "%s/%s/%s/format",
+ tracing_events_path, sys, name);
+
+ if (filename__read_str(path, &data, &size))
+ return NULL;
+
+ pevent_parse_format(pevent, &event, data, size, sys);
+
+ free(data);
+ return event;
+}
+
+struct event_format*
+trace_event__tp_format(const char *sys, const char *name)
+{
+ static bool initialized;
+
+ if (!initialized) {
+ int be = traceevent_host_bigendian();
+ struct pevent *pevent;
+
+ if (trace_event__init(&tevent))
+ return NULL;
+
+ pevent = tevent.pevent;
+ pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+ pevent_set_file_bigendian(pevent, be);
+ pevent_set_host_bigendian(pevent, be);
+ initialized = true;
+ }
+
+ return tp_format(sys, name);
+}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 04df631..7b6d686 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -3,17 +3,26 @@
#include <traceevent/event-parse.h>
#include "parse-events.h"
-#include "session.h"
struct machine;
struct perf_sample;
union perf_event;
struct perf_tool;
struct thread;
+struct plugin_list;
+
+struct trace_event {
+ struct pevent *pevent;
+ struct plugin_list *plugin_list;
+};
+
+int trace_event__init(struct trace_event *t);
+void trace_event__cleanup(struct trace_event *t);
+struct event_format*
+trace_event__tp_format(const char *sys, const char *name);
int bigendian(void);
-struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
void event_format__print(struct event_format *event,
int cpu, void *data, int size);
@@ -27,7 +36,7 @@ raw_field_value(struct event_format *event, const char *name, void *data);
void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
-ssize_t trace_report(int fd, struct pevent **pevent, bool repipe);
+ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
struct event_format *trace_find_next_event(struct pevent *pevent,
struct event_format *event);
@@ -59,7 +68,6 @@ struct scripting_ops {
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine,
struct thread *thread,
struct addr_location *al);
int (*generate_script) (struct pevent *pevent, const char *outfile);
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c
index 0efd539..742f23b 100644
--- a/tools/perf/util/unwind.c
+++ b/tools/perf/util/unwind.c
@@ -28,6 +28,7 @@
#include "session.h"
#include "perf_regs.h"
#include "unwind.h"
+#include "symbol.h"
#include "util.h"
extern int
@@ -158,23 +159,6 @@ static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
__v; \
})
-static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
- GElf_Shdr *shp, const char *name)
-{
- Elf_Scn *sec = NULL;
-
- while ((sec = elf_nextscn(elf, sec)) != NULL) {
- char *str;
-
- gelf_getshdr(sec, shp);
- str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
- if (!strcmp(name, str))
- break;
- }
-
- return sec;
-}
-
static u64 elf_section_offset(int fd, const char *name)
{
Elf *elf;
@@ -190,7 +174,7 @@ static u64 elf_section_offset(int fd, const char *name)
if (gelf_getehdr(elf, &ehdr) == NULL)
break;
- if (!elf_section_by_name(elf, &ehdr, &shdr, name))
+ if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
break;
offset = shdr.sh_offset;
@@ -340,10 +324,10 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
/* Check the .debug_frame section for unwinding info */
if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
memset(&di, 0, sizeof(di));
- dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
- map->start, map->end);
- return dwarf_search_unwind_table(as, ip, &di, pi,
- need_unwind_info, arg);
+ if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+ map->start, map->end))
+ return dwarf_search_unwind_table(as, ip, &di, pi,
+ need_unwind_info, arg);
}
#endif
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 28a0a89..42ad667 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -1,11 +1,17 @@
#include "../perf.h"
#include "util.h"
+#include "fs.h"
#include <sys/mman.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <byteswap.h>
+#include <linux/kernel.h>
/*
* XXX We need to find a better place for these things...
@@ -151,21 +157,40 @@ unsigned long convert_unit(unsigned long value, char *unit)
return value;
}
-int readn(int fd, void *buf, size_t n)
+static ssize_t ion(bool is_read, int fd, void *buf, size_t n)
{
void *buf_start = buf;
+ size_t left = n;
- while (n) {
- int ret = read(fd, buf, n);
+ while (left) {
+ ssize_t ret = is_read ? read(fd, buf, left) :
+ write(fd, buf, left);
if (ret <= 0)
return ret;
- n -= ret;
- buf += ret;
+ left -= ret;
+ buf += ret;
}
- return buf - buf_start;
+ BUG_ON((size_t)(buf - buf_start) != n);
+ return n;
+}
+
+/*
+ * Read exactly 'n' bytes or return an error.
+ */
+ssize_t readn(int fd, void *buf, size_t n)
+{
+ return ion(true, fd, buf, n);
+}
+
+/*
+ * Write exactly 'n' bytes or return an error.
+ */
+ssize_t writen(int fd, void *buf, size_t n)
+{
+ return ion(false, fd, buf, n);
}
size_t hex_width(u64 v)
@@ -413,3 +438,102 @@ int filename__read_int(const char *filename, int *value)
close(fd);
return err;
}
+
+int filename__read_str(const char *filename, char **buf, size_t *sizep)
+{
+ size_t size = 0, alloc_size = 0;
+ void *bf = NULL, *nbf;
+ int fd, n, err = 0;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ do {
+ if (size == alloc_size) {
+ alloc_size += BUFSIZ;
+ nbf = realloc(bf, alloc_size);
+ if (!nbf) {
+ err = -ENOMEM;
+ break;
+ }
+
+ bf = nbf;
+ }
+
+ n = read(fd, bf + size, alloc_size - size);
+ if (n < 0) {
+ if (size) {
+ pr_warning("read failed %d: %s\n",
+ errno, strerror(errno));
+ err = 0;
+ } else
+ err = -errno;
+
+ break;
+ }
+
+ size += n;
+ } while (n > 0);
+
+ if (!err) {
+ *sizep = size;
+ *buf = bf;
+ } else
+ free(bf);
+
+ close(fd);
+ return err;
+}
+
+const char *get_filename_for_perf_kvm(void)
+{
+ const char *filename;
+
+ if (perf_host && !perf_guest)
+ filename = strdup("perf.data.host");
+ else if (!perf_host && perf_guest)
+ filename = strdup("perf.data.guest");
+ else
+ filename = strdup("perf.data.kvm");
+
+ return filename;
+}
+
+int perf_event_paranoid(void)
+{
+ char path[PATH_MAX];
+ const char *procfs = procfs__mountpoint();
+ int value;
+
+ if (!procfs)
+ return INT_MAX;
+
+ scnprintf(path, PATH_MAX, "%s/sys/kernel/perf_event_paranoid", procfs);
+
+ if (filename__read_int(path, &value))
+ return INT_MAX;
+
+ return value;
+}
+
+void mem_bswap_32(void *src, int byte_size)
+{
+ u32 *m = src;
+ while (byte_size > 0) {
+ *m = bswap_32(*m);
+ byte_size -= sizeof(u32);
+ ++m;
+ }
+}
+
+void mem_bswap_64(void *src, int byte_size)
+{
+ u64 *m = src;
+
+ while (byte_size > 0) {
+ *m = bswap_64(*m);
+ byte_size -= sizeof(u64);
+ ++m;
+ }
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c8f362d..6995d66 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -71,8 +71,9 @@
#include <linux/magic.h>
#include "types.h"
#include <sys/ttydefaults.h>
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include <termios.h>
+#include <linux/bitops.h>
extern const char *graph_line;
extern const char *graph_dotted_line;
@@ -185,6 +186,8 @@ static inline void *zalloc(size_t size)
return calloc(1, size);
}
+#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
+
static inline int has_extension(const char *filename, const char *ext)
{
size_t len = strlen(filename);
@@ -253,7 +256,8 @@ bool strlazymatch(const char *str, const char *pat);
int strtailcmp(const char *s1, const char *s2);
char *strxfrchar(char *s, char from, char to);
unsigned long convert_unit(unsigned long value, char *unit);
-int readn(int fd, void *buf, size_t size);
+ssize_t readn(int fd, void *buf, size_t n);
+ssize_t writen(int fd, void *buf, size_t n);
struct perf_event_attr;
@@ -280,6 +284,17 @@ static inline unsigned next_pow2(unsigned x)
return 1ULL << (32 - __builtin_clz(x - 1));
}
+static inline unsigned long next_pow2_l(unsigned long x)
+{
+#if BITS_PER_LONG == 64
+ if (x <= (1UL << 31))
+ return next_pow2(x);
+ return (unsigned long)next_pow2(x >> 32) << 32;
+#else
+ return next_pow2(x);
+#endif
+}
+
size_t hex_width(u64 v);
int hex2u64(const char *ptr, u64 *val);
@@ -307,4 +322,11 @@ char *get_srcline(struct dso *dso, unsigned long addr);
void free_srcline(char *srcline);
int filename__read_int(const char *filename, int *value);
+int filename__read_str(const char *filename, char **buf, size_t *sizep);
+int perf_event_paranoid(void);
+
+void mem_bswap_64(void *src, int byte_size);
+void mem_bswap_32(void *src, int byte_size);
+
+const char *get_filename_for_perf_kvm(void);
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index 697c8b4..0fb3c1f 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -31,14 +31,14 @@ void perf_read_values_destroy(struct perf_read_values *values)
return;
for (i = 0; i < values->threads; i++)
- free(values->value[i]);
- free(values->value);
- free(values->pid);
- free(values->tid);
- free(values->counterrawid);
+ zfree(&values->value[i]);
+ zfree(&values->value);
+ zfree(&values->pid);
+ zfree(&values->tid);
+ zfree(&values->counterrawid);
for (i = 0; i < values->counters; i++)
- free(values->countername[i]);
- free(values->countername);
+ zfree(&values->countername[i]);
+ zfree(&values->countername);
}
static void perf_read_values__enlarge_threads(struct perf_read_values *values)
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 3915982..0ddb3b8 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -103,7 +103,7 @@ struct dso *vdso__dso_findnew(struct list_head *head)
dso = dso__new(VDSO__MAP_NAME);
if (dso != NULL) {
dsos__add(head, dso);
- dso__set_long_name(dso, file);
+ dso__set_long_name(dso, file, false);
}
}
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index ee76544..8abbef1 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -61,6 +61,7 @@ QUIET_SUBDIR1 =
ifneq ($(findstring $(MAKEFLAGS),s),s)
ifneq ($(V),1)
QUIET_CC = @echo ' CC '$@;
+ QUIET_CC_FPIC = @echo ' CC FPIC '$@;
QUIET_AR = @echo ' AR '$@;
QUIET_LINK = @echo ' LINK '$@;
QUIET_MKDIR = @echo ' MKDIR '$@;
@@ -76,5 +77,8 @@ ifneq ($(findstring $(MAKEFLAGS),s),s)
+@echo ' DESCEND '$(1); \
mkdir -p $(OUTPUT)$(1) && \
$(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
+
+ QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
+ QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
endif
endif
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 999eab1..4063156 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -18,6 +18,7 @@ $| = 1;
my %opt;
my %repeat_tests;
my %repeats;
+my %evals;
#default opts
my %default = (
@@ -25,6 +26,7 @@ my %default = (
"TEST_TYPE" => "build",
"BUILD_TYPE" => "randconfig",
"MAKE_CMD" => "make",
+ "CLOSE_CONSOLE_SIGNAL" => "INT",
"TIMEOUT" => 120,
"TMP_DIR" => "/tmp/ktest/\${MACHINE}",
"SLEEP_TIME" => 60, # sleep time between tests
@@ -39,6 +41,7 @@ my %default = (
"CLEAR_LOG" => 0,
"BISECT_MANUAL" => 0,
"BISECT_SKIP" => 1,
+ "BISECT_TRIES" => 1,
"MIN_CONFIG_TYPE" => "boot",
"SUCCESS_LINE" => "login:",
"DETECT_TRIPLE_FAULT" => 1,
@@ -137,6 +140,7 @@ my $bisect_bad_commit = "";
my $reverse_bisect;
my $bisect_manual;
my $bisect_skip;
+my $bisect_tries;
my $config_bisect_good;
my $bisect_ret_good;
my $bisect_ret_bad;
@@ -163,6 +167,7 @@ my $timeout;
my $booted_timeout;
my $detect_triplefault;
my $console;
+my $close_console_signal;
my $reboot_success_line;
my $success_line;
my $stop_after_success;
@@ -273,6 +278,7 @@ my %option_map = (
"IGNORE_ERRORS" => \$ignore_errors,
"BISECT_MANUAL" => \$bisect_manual,
"BISECT_SKIP" => \$bisect_skip,
+ "BISECT_TRIES" => \$bisect_tries,
"CONFIG_BISECT_GOOD" => \$config_bisect_good,
"BISECT_RET_GOOD" => \$bisect_ret_good,
"BISECT_RET_BAD" => \$bisect_ret_bad,
@@ -285,6 +291,7 @@ my %option_map = (
"TIMEOUT" => \$timeout,
"BOOTED_TIMEOUT" => \$booted_timeout,
"CONSOLE" => \$console,
+ "CLOSE_CONSOLE_SIGNAL" => \$close_console_signal,
"DETECT_TRIPLE_FAULT" => \$detect_triplefault,
"SUCCESS_LINE" => \$success_line,
"REBOOT_SUCCESS_LINE" => \$reboot_success_line,
@@ -445,6 +452,27 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF"
EOF
;
+sub _logit {
+ if (defined($opt{"LOG_FILE"})) {
+ open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+ print OUT @_;
+ close(OUT);
+ }
+}
+
+sub logit {
+ if (defined($opt{"LOG_FILE"})) {
+ _logit @_;
+ } else {
+ print @_;
+ }
+}
+
+sub doprint {
+ print @_;
+ _logit @_;
+}
+
sub read_prompt {
my ($cancel, $prompt) = @_;
@@ -662,6 +690,22 @@ sub set_value {
}
}
+sub set_eval {
+ my ($lvalue, $rvalue, $name) = @_;
+
+ my $prvalue = process_variables($rvalue);
+ my $arr;
+
+ if (defined($evals{$lvalue})) {
+ $arr = $evals{$lvalue};
+ } else {
+ $arr = [];
+ $evals{$lvalue} = $arr;
+ }
+
+ push @{$arr}, $rvalue;
+}
+
sub set_variable {
my ($lvalue, $rvalue) = @_;
@@ -947,6 +991,20 @@ sub __read_config {
$test_case = 1;
}
+ } elsif (/^\s*([A-Z_\[\]\d]+)\s*=~\s*(.*?)\s*$/) {
+
+ next if ($skip);
+
+ my $lvalue = $1;
+ my $rvalue = $2;
+
+ if ($default || $lvalue =~ /\[\d+\]$/) {
+ set_eval($lvalue, $rvalue, $name);
+ } else {
+ my $val = "$lvalue\[$test_num\]";
+ set_eval($val, $rvalue, $name);
+ }
+
} elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
next if ($skip);
@@ -1126,6 +1184,10 @@ sub __eval_option {
} elsif (defined($opt{$var})) {
$o = $opt{$var};
$retval = "$retval$o";
+ } elsif ($var eq "KERNEL_VERSION" && defined($make)) {
+ # special option KERNEL_VERSION uses kernel version
+ get_version();
+ $retval = "$retval$version";
} else {
$retval = "$retval\$\{$var\}";
}
@@ -1140,6 +1202,33 @@ sub __eval_option {
return $retval;
}
+sub process_evals {
+ my ($name, $option, $i) = @_;
+
+ my $option_name = "$name\[$i\]";
+ my $ev;
+
+ my $old_option = $option;
+
+ if (defined($evals{$option_name})) {
+ $ev = $evals{$option_name};
+ } elsif (defined($evals{$name})) {
+ $ev = $evals{$name};
+ } else {
+ return $option;
+ }
+
+ for my $e (@{$ev}) {
+ eval "\$option =~ $e";
+ }
+
+ if ($option ne $old_option) {
+ doprint("$name changed from '$old_option' to '$option'\n");
+ }
+
+ return $option;
+}
+
sub eval_option {
my ($name, $option, $i) = @_;
@@ -1160,28 +1249,9 @@ sub eval_option {
$option = __eval_option($name, $option, $i);
}
- return $option;
-}
+ $option = process_evals($name, $option, $i);
-sub _logit {
- if (defined($opt{"LOG_FILE"})) {
- open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
- print OUT @_;
- close(OUT);
- }
-}
-
-sub logit {
- if (defined($opt{"LOG_FILE"})) {
- _logit @_;
- } else {
- print @_;
- }
-}
-
-sub doprint {
- print @_;
- _logit @_;
+ return $option;
}
sub run_command;
@@ -1296,7 +1366,7 @@ sub close_console {
my ($fp, $pid) = @_;
doprint "kill child process $pid\n";
- kill 2, $pid;
+ kill $close_console_signal, $pid;
print "closing!\n";
close($fp);
@@ -2517,12 +2587,29 @@ sub run_bisect {
$buildtype = "useconfig:$minconfig";
}
- my $ret = run_bisect_test $type, $buildtype;
+ # If the user sets bisect_tries to less than 1, then no tries
+ # is a success.
+ my $ret = 1;
- if ($bisect_manual) {
+ # Still let the user manually decide that though.
+ if ($bisect_tries < 1 && $bisect_manual) {
$ret = answer_bisect;
}
+ for (my $i = 0; $i < $bisect_tries; $i++) {
+ if ($bisect_tries > 1) {
+ my $t = $i + 1;
+ doprint("Running bisect trial $t of $bisect_tries:\n");
+ }
+ $ret = run_bisect_test $type, $buildtype;
+
+ if ($bisect_manual) {
+ $ret = answer_bisect;
+ }
+
+ last if (!$ret);
+ }
+
# Are we looking for where it worked, not failed?
if ($reverse_bisect && $ret >= 0) {
$ret = !$ret;
@@ -3916,6 +4003,18 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
my $makecmd = set_test_option("MAKE_CMD", $i);
+ $outputdir = set_test_option("OUTPUT_DIR", $i);
+ $builddir = set_test_option("BUILD_DIR", $i);
+
+ chdir $builddir || die "can't change directory to $builddir";
+
+ if (!-d $outputdir) {
+ mkpath($outputdir) or
+ die "can't create $outputdir";
+ }
+
+ $make = "$makecmd O=$outputdir";
+
# Load all the options into their mapped variable names
foreach my $opt (keys %option_map) {
${$option_map{$opt}} = set_test_option($opt, $i);
@@ -3940,13 +4039,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$start_minconfig = $minconfig;
}
- chdir $builddir || die "can't change directory to $builddir";
-
- foreach my $dir ($tmpdir, $outputdir) {
- if (!-d $dir) {
- mkpath($dir) or
- die "can't create $dir";
- }
+ if (!-d $tmpdir) {
+ mkpath($tmpdir) or
+ die "can't create $tmpdir";
}
$ENV{"SSH_USER"} = $ssh_user;
@@ -3955,7 +4050,6 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$buildlog = "$tmpdir/buildlog-$machine";
$testlog = "$tmpdir/testlog-$machine";
$dmesg = "$tmpdir/dmesg-$machine";
- $make = "$makecmd O=$outputdir";
$output_config = "$outputdir/.config";
if (!$buildonly) {
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 0a290fb..172eec4 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -328,6 +328,13 @@
# For a virtual machine with guest name "Guest".
#CONSOLE = virsh console Guest
+# Signal to send to kill console.
+# ktest.pl will create a child process to monitor the console.
+# When the console is finished, ktest will kill the child process
+# with this signal.
+# (default INT)
+#CLOSE_CONSOLE_SIGNAL = HUP
+
# Required version ending to differentiate the test
# from other linux builds on the system.
#LOCALVERSION = -test
@@ -1021,6 +1028,20 @@
# BISECT_BAD with BISECT_CHECK = good or
# BISECT_CHECK = bad, respectively.
#
+# BISECT_TRIES = 5 (optional, default 1)
+#
+# For those cases that it takes several tries to hit a bug,
+# the BISECT_TRIES is useful. It is the number of times the
+# test is ran before it says the kernel is good. The first failure
+# will stop trying and mark the current SHA1 as bad.
+#
+# Note, as with all race bugs, there's no guarantee that if
+# it succeeds, it is really a good bisect. But it helps in case
+# the bug is some what reliable.
+#
+# You can set BISECT_TRIES to zero, and all tests will be considered
+# good, unless you also set BISECT_MANUAL.
+#
# BISECT_RET_GOOD = 0 (optional, default undefined)
#
# In case the specificed test returns something other than just
diff --git a/tools/testing/selftests/rcutorture/.gitignore b/tools/testing/selftests/rcutorture/.gitignore
new file mode 100644
index 0000000..05838f6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/.gitignore
@@ -0,0 +1,6 @@
+initrd
+linux-2.6
+b[0-9]*
+rcu-test-image
+res
+*.swp
diff --git a/tools/testing/selftests/rcutorture/bin/config2frag.sh b/tools/testing/selftests/rcutorture/bin/config2frag.sh
new file mode 100644
index 0000000..9f9ffcd
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/config2frag.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+# Usage: sh config2frag.sh < .config > configfrag
+#
+# Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the
+# resulting file becomes a legitimate Kconfig fragment.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+LANG=C sed -e 's/^# CONFIG_\([a-zA-Z0-9_]*\) is not set$/CONFIG_\1=n/'
diff --git a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
new file mode 100755
index 0000000..43540f1
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Extract the number of CPUs expected from the specified Kconfig-file
+# fragment by checking CONFIG_SMP and CONFIG_NR_CPUS. If the specified
+# file gives no clue, base the number on the number of idle CPUs on
+# the system.
+#
+# Usage: configNR_CPUS.sh config-frag
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+cf=$1
+if test ! -r $cf
+then
+ echo Unreadable config fragment $cf 1>&2
+ exit -1
+fi
+if grep -q '^CONFIG_SMP=n$' $cf
+then
+ echo 1
+ exit 0
+fi
+if grep -q '^CONFIG_NR_CPUS=' $cf
+then
+ grep '^CONFIG_NR_CPUS=' $cf |
+ sed -e 's/^CONFIG_NR_CPUS=\([0-9]*\).*$/\1/'
+ exit 0
+fi
+cpus2use.sh
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
new file mode 100755
index 0000000..d686537
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+# Usage: sh configcheck.sh .config .config-template
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/abat-chk-config.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+cat $1 > $T/.config
+
+cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' |
+awk '
+BEGIN {
+ print "if grep -q \"" $0 "\" < '"$T/.config"'";
+ print "then";
+ print "\t:";
+ print "else";
+ if ($1 == "#") {
+ print "\tif grep -q \"" $2 "\" < '"$T/.config"'";
+ print "\tthen";
+ print "\t\tif test \"$firsttime\" = \"\""
+ print "\t\tthen"
+ print "\t\t\tfirsttime=1"
+ print "\t\tfi"
+ print "\t\techo \":" $2 ": improperly set\"";
+ print "\telse";
+ print "\t\t:";
+ print "\tfi";
+ } else {
+ print "\tif test \"$firsttime\" = \"\""
+ print "\tthen"
+ print "\t\tfirsttime=1"
+ print "\tfi"
+ print "\techo \":" $0 ": improperly set\"";
+ }
+ print "fi";
+ }' | sh
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
new file mode 100755
index 0000000..a1be6e6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -0,0 +1,74 @@
+#!/bin/sh
+#
+# sh configinit.sh config-spec-file [ build output dir ]
+#
+# Create a .config file from the spec file. Run from the kernel source tree.
+# Exits with 0 if all went well, with 1 if all went well but the config
+# did not match, and some other number for other failures.
+#
+# The first argument is the .config specification file, which contains
+# desired settings, for example, "CONFIG_NO_HZ=y". For best results,
+# this should be a full pathname.
+#
+# The second argument is a optional path to a build output directory,
+# for example, "O=/tmp/foo". If this argument is omitted, the .config
+# file will be generated directly in the current directory.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/configinit.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# Capture config spec file.
+
+c=$1
+buildloc=$2
+builddir=
+if test -n $buildloc
+then
+ if echo $buildloc | grep -q '^O='
+ then
+ builddir=`echo $buildloc | sed -e 's/^O=//'`
+ if test ! -d $builddir
+ then
+ mkdir $builddir
+ fi
+ else
+ echo Bad build directory: \"$builddir\"
+ exit 2
+ fi
+fi
+
+sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
+sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
+grep '^grep' < $T/u.sh > $T/upd.sh
+echo "cat - $c" >> $T/upd.sh
+make mrproper
+make $buildloc distclean > $builddir/Make.distclean 2>&1
+make $buildloc defconfig > $builddir/Make.defconfig.out 2>&1
+mv $builddir/.config $builddir/.config.sav
+sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
+cp $builddir/.config $builddir/.config.new
+yes '' | make $buildloc oldconfig > $builddir/Make.modconfig.out 2>&1
+
+# verify new config matches specification.
+configcheck.sh $builddir/.config $c
+
+exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
new file mode 100755
index 0000000..abe14b7
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Get an estimate of how CPU-hoggy to be.
+#
+# Usage: cpus2use.sh
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
+idlecpus=`mpstat | tail -1 | \
+ awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'`
+awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
+BEGIN {
+ cpus2use = idlecpus;
+ if (cpus2use < 1)
+ cpus2use = 1;
+ if (cpus2use < ncpus / 10)
+ cpus2use = ncpus / 10;
+ if (cpus2use == int(cpus2use))
+ cpus2use = int(cpus2use)
+ else
+ cpus2use = int(cpus2use) + 1
+ print cpus2use;
+}'
+
diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh
new file mode 100644
index 0000000..587561d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/functions.sh
@@ -0,0 +1,198 @@
+#!/bin/bash
+#
+# Shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# bootparam_hotplug_cpu bootparam-string
+#
+# Returns 1 if the specified boot-parameter string tells rcutorture to
+# test CPU-hotplug operations.
+bootparam_hotplug_cpu () {
+ echo "$1" | grep -q "rcutorture\.onoff_"
+}
+
+# checkarg --argname argtype $# arg mustmatch cannotmatch
+#
+# Checks the specified argument "arg" against the mustmatch and cannotmatch
+# patterns.
+checkarg () {
+ if test $3 -le 1
+ then
+ echo $1 needs argument $2 matching \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$5"
+ then
+ :
+ else
+ echo $1 $2 \"$4\" must match \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$6"
+ then
+ echo $1 $2 \"$4\" must not match \"$6\"
+ usage
+ fi
+}
+
+# configfrag_boot_params bootparam-string config-fragment-file
+#
+# Adds boot parameters from the .boot file, if any.
+configfrag_boot_params () {
+ if test -r "$2.boot"
+ then
+ echo $1 `grep -v '^#' "$2.boot" | tr '\012' ' '`
+ else
+ echo $1
+ fi
+}
+
+# configfrag_hotplug_cpu config-fragment-file
+#
+# Returns 1 if the config fragment specifies hotplug CPU.
+configfrag_hotplug_cpu () {
+ if test ! -r "$1"
+ then
+ echo Unreadable config fragment "$1" 1>&2
+ exit -1
+ fi
+ grep -q '^CONFIG_HOTPLUG_CPU=y$' "$1"
+}
+
+# identify_qemu builddir
+#
+# Returns our best guess as to which qemu command is appropriate for
+# the kernel at hand. Override with the RCU_QEMU_CMD environment variable.
+identify_qemu () {
+ local u="`file "$1"`"
+ if test -n "$RCU_QEMU_CMD"
+ then
+ echo $RCU_QEMU_CMD
+ elif echo $u | grep -q x86-64
+ then
+ echo qemu-system-x86_64
+ elif echo $u | grep -q "Intel 80386"
+ then
+ echo qemu-system-i386
+ elif uname -a | grep -q ppc64
+ then
+ echo qemu-system-ppc64
+ else
+ echo Cannot figure out what qemu command to use! 1>&2
+ # Usually this will be one of /usr/bin/qemu-system-*
+ # Use RCU_QEMU_CMD environment variable or appropriate
+ # argument to top-level script.
+ exit 1
+ fi
+}
+
+# identify_qemu_append qemu-cmd
+#
+# Output arguments for the qemu "-append" string based on CPU type
+# and the RCU_QEMU_INTERACTIVE environment variable.
+identify_qemu_append () {
+ case "$1" in
+ qemu-system-x86_64|qemu-system-i386)
+ echo noapic selinux=0 initcall_debug debug
+ ;;
+ esac
+ if test -n "$RCU_QEMU_INTERACTIVE"
+ then
+ echo root=/dev/sda
+ else
+ echo console=ttyS0
+ fi
+}
+
+# identify_qemu_args qemu-cmd serial-file
+#
+# Output arguments for qemu arguments based on the RCU_QEMU_MAC
+# and RCU_QEMU_INTERACTIVE environment variables.
+identify_qemu_args () {
+ case "$1" in
+ qemu-system-x86_64|qemu-system-i386)
+ ;;
+ qemu-system-ppc64)
+ echo -enable-kvm -M pseries -cpu POWER7 -nodefaults
+ echo -device spapr-vscsi
+ if test -n "$RCU_QEMU_INTERACTIVE" -a -n "$RCU_QEMU_MAC"
+ then
+ echo -device spapr-vlan,netdev=net0,mac=$RCU_QEMU_MAC
+ echo -netdev bridge,br=br0,id=net0
+ elif test -n "$RCU_QEMU_INTERACTIVE"
+ then
+ echo -net nic -net user
+ fi
+ ;;
+ esac
+ if test -n "$RCU_QEMU_INTERACTIVE"
+ then
+ echo -monitor stdio -serial pty -S
+ else
+ echo -serial file:$2
+ fi
+}
+
+# identify_qemu_vcpus
+#
+# Returns the number of virtual CPUs available to the aggregate of the
+# guest OSes.
+identify_qemu_vcpus () {
+ lscpu | grep '^CPU(s):' | sed -e 's/CPU(s)://'
+}
+
+# print_bug
+#
+# Prints "BUG: " in red followed by remaining arguments
+print_bug () {
+ printf '\033[031mBUG: \033[m'
+ echo $*
+}
+
+# print_warning
+#
+# Prints "WARNING: " in yellow followed by remaining arguments
+print_warning () {
+ printf '\033[033mWARNING: \033[m'
+ echo $*
+}
+
+# specify_qemu_cpus qemu-cmd qemu-args #cpus
+#
+# Appends a string containing "-smp XXX" to qemu-args, unless the incoming
+# qemu-args already contains "-smp".
+specify_qemu_cpus () {
+ local nt;
+
+ if echo $2 | grep -q -e -smp
+ then
+ echo $2
+ else
+ case "$1" in
+ qemu-system-x86_64|qemu-system-i386)
+ echo $2 -smp $3
+ ;;
+ qemu-system-ppc64)
+ nt="`lscpu | grep '^NUMA node0' | sed -e 's/^[^,]*,\([0-9]*\),.*$/\1/'`"
+ echo $2 -smp cores=`expr \( $3 + $nt - 1 \) / $nt`,threads=$nt
+ ;;
+ esac
+ fi
+}
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
new file mode 100755
index 0000000..197901e
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Build a kvm-ready Linux kernel from the tree in the current directory.
+#
+# Usage: sh kvm-build.sh config-template build-dir more-configs
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+config_template=${1}
+if test -z "$config_template" -o ! -f "$config_template" -o ! -r "$config_template"
+then
+ echo "kvm-build.sh :$config_template: Not a readable file"
+ exit 1
+fi
+builddir=${2}
+if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
+then
+ echo "kvm-build.sh :$builddir: Not a writable directory, cannot build into it"
+ exit 1
+fi
+moreconfigs=${3}
+if test -z "$moreconfigs" -o ! -r "$moreconfigs"
+then
+ echo "kvm-build.sh :$moreconfigs: Not a readable file"
+ exit 1
+fi
+
+T=/tmp/test-linux.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+cat ${config_template} | grep -v CONFIG_RCU_TORTURE_TEST > $T/config
+cat << ___EOF___ >> $T/config
+CONFIG_INITRAMFS_SOURCE="$RCU_INITRD"
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_CONSOLE=y
+___EOF___
+cat $moreconfigs >> $T/config
+
+configinit.sh $T/config O=$builddir
+retval=$?
+if test $retval -gt 1
+then
+ exit 2
+fi
+ncpus=`cpus2use.sh`
+make O=$builddir -j$ncpus $RCU_KMAKE_ARG > $builddir/Make.out 2>&1
+retval=$?
+if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
+then
+ echo Kernel build error
+ egrep "Stop|Error|error:|warning:" < $builddir/Make.out
+ echo Run aborted.
+ exit 3
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
new file mode 100755
index 0000000..baef09f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Given the results directories for previous KVM runs of rcutorture,
+# check the build and console output for errors. Given a directory
+# containing results directories, this recursively checks them all.
+#
+# Usage: sh kvm-recheck.sh resdir ...
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
+for rd in "$@"
+do
+ dirs=`find $rd -name Make.defconfig.out -print | sort | sed -e 's,/[^/]*$,,' | sort -u`
+ for i in $dirs
+ do
+ configfile=`echo $i | sed -e 's/^.*\///'`
+ echo $configfile
+ configcheck.sh $i/.config $i/ConfigFragment
+ parse-build.sh $i/Make.out $configfile
+ parse-rcutorture.sh $i/console.log $configfile
+ parse-console.sh $i/console.log $configfile
+ if test -r $i/Warnings
+ then
+ cat $i/Warnings
+ fi
+ done
+done
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh
new file mode 100755
index 0000000..151b237
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh
@@ -0,0 +1,192 @@
+#!/bin/bash
+#
+# Run a kvm-based test of the specified tree on the specified configs.
+# Fully automated run and error checking, no graphics console.
+#
+# Execute this in the source tree. Do not run it as a background task
+# because qemu does not seem to like that much.
+#
+# Usage: sh kvm-test-1-rcu.sh config builddir resdir minutes qemu-args bootargs
+#
+# qemu-args defaults to "" -- you will want "-nographic" if running headless.
+# bootargs defaults to "root=/dev/sda noapic selinux=0 console=ttyS0"
+# "initcall_debug debug rcutorture.stat_interval=15"
+# "rcutorture.shutdown_secs=$((minutes * 60))"
+# "rcutorture.rcutorture_runnable=1"
+#
+# Anything you specify for either qemu-args or bootargs is appended to
+# the default values. The "-smp" value is deduced from the contents of
+# the config fragment.
+#
+# More sophisticated argument parsing is clearly needed.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+grace=120
+
+T=/tmp/kvm-test-1-rcu.sh.$$
+trap 'rm -rf $T' 0
+
+. $KVM/bin/functions.sh
+. $KVPATH/ver_functions.sh
+
+config_template=${1}
+title=`echo $config_template | sed -e 's/^.*\///'`
+builddir=${2}
+if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
+then
+ echo "kvm-test-1-rcu.sh :$builddir: Not a writable directory, cannot build into it"
+ exit 1
+fi
+resdir=${3}
+if test -z "$resdir" -o ! -d "$resdir" -o ! -w "$resdir"
+then
+ echo "kvm-test-1-rcu.sh :$resdir: Not a writable directory, cannot build into it"
+ exit 1
+fi
+cp $config_template $resdir/ConfigFragment
+echo ' ---' `date`: Starting build
+echo ' ---' Kconfig fragment at: $config_template >> $resdir/log
+cat << '___EOF___' >> $T
+CONFIG_RCU_TORTURE_TEST=y
+___EOF___
+# Optimizations below this point
+# CONFIG_USB=n
+# CONFIG_SECURITY=n
+# CONFIG_NFS_FS=n
+# CONFIG_SOUND=n
+# CONFIG_INPUT_JOYSTICK=n
+# CONFIG_INPUT_TABLET=n
+# CONFIG_INPUT_TOUCHSCREEN=n
+# CONFIG_INPUT_MISC=n
+# CONFIG_INPUT_MOUSE=n
+# # CONFIG_NET=n # disables console access, so accept the slower build.
+# CONFIG_SCSI=n
+# CONFIG_ATA=n
+# CONFIG_FAT_FS=n
+# CONFIG_MSDOS_FS=n
+# CONFIG_VFAT_FS=n
+# CONFIG_ISO9660_FS=n
+# CONFIG_QUOTA=n
+# CONFIG_HID=n
+# CONFIG_CRYPTO=n
+# CONFIG_PCCARD=n
+# CONFIG_PCMCIA=n
+# CONFIG_CARDBUS=n
+# CONFIG_YENTA=n
+if kvm-build.sh $config_template $builddir $T
+then
+ cp $builddir/Make*.out $resdir
+ cp $builddir/.config $resdir
+ cp $builddir/arch/x86/boot/bzImage $resdir
+ parse-build.sh $resdir/Make.out $title
+else
+ cp $builddir/Make*.out $resdir
+ echo Build failed, not running KVM, see $resdir.
+ exit 1
+fi
+minutes=$4
+seconds=$(($minutes * 60))
+qemu_args=$5
+boot_args=$6
+
+cd $KVM
+kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
+echo ' ---' `date`: Starting kernel
+
+# Determine the appropriate flavor of qemu command.
+QEMU="`identify_qemu $builddir/vmlinux.o`"
+
+# Generate -smp qemu argument.
+cpu_count=`configNR_CPUS.sh $config_template`
+vcpus=`identify_qemu_vcpus`
+if test $cpu_count -gt $vcpus
+then
+ echo CPU count limited from $cpu_count to $vcpus
+ touch $resdir/Warnings
+ echo CPU count limited from $cpu_count to $vcpus >> $resdir/Warnings
+ cpu_count=$vcpus
+fi
+qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
+
+# Generate architecture-specific and interaction-specific qemu arguments
+qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$builddir/console.log"`"
+
+# Generate qemu -append arguments
+qemu_append="`identify_qemu_append "$QEMU"`"
+
+# Pull in Kconfig-fragment boot parameters
+boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
+# Generate CPU-hotplug boot parameters
+boot_args="`rcutorture_param_onoff "$boot_args" $builddir/.config`"
+# Generate rcu_barrier() boot parameter
+boot_args="`rcutorture_param_n_barrier_cbs "$boot_args"`"
+# Pull in standard rcutorture boot arguments
+boot_args="$boot_args rcutorture.stat_interval=15 rcutorture.shutdown_secs=$seconds rcutorture.rcutorture_runnable=1"
+
+echo $QEMU $qemu_args -m 512 -kernel $builddir/arch/x86/boot/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
+if test -n "$RCU_BUILDONLY"
+then
+ echo Build-only run specified, boot/test omitted.
+ exit 0
+fi
+$QEMU $qemu_args -m 512 -kernel $builddir/arch/x86/boot/bzImage -append "$qemu_append $boot_args" &
+qemu_pid=$!
+commandcompleted=0
+echo Monitoring qemu job at pid $qemu_pid
+for ((i=0;i<$seconds;i++))
+do
+ if kill -0 $qemu_pid > /dev/null 2>&1
+ then
+ sleep 1
+ else
+ commandcompleted=1
+ kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
+ if test $kruntime -lt $seconds
+ then
+ echo Completed in $kruntime vs. $seconds >> $resdir/Warnings 2>&1
+ else
+ echo ' ---' `date`: Kernel done
+ fi
+ break
+ fi
+done
+if test $commandcompleted -eq 0
+then
+ echo Grace period for qemu job at pid $qemu_pid
+ for ((i=0;i<=$grace;i++))
+ do
+ if kill -0 $qemu_pid > /dev/null 2>&1
+ then
+ sleep 1
+ else
+ break
+ fi
+ if test $i -eq $grace
+ then
+ kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }'`
+ echo "!!! Hang at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+ kill -KILL $qemu_pid
+ fi
+ done
+fi
+
+cp $builddir/console.log $resdir
+parse-rcutorture.sh $resdir/console.log $title
+parse-console.sh $resdir/console.log $title
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
new file mode 100644
index 0000000..1b7923b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -0,0 +1,210 @@
+#!/bin/bash
+#
+# Run a series of 14 tests under KVM. These are not particularly
+# well-selected or well-tuned, but are the current set. Run from the
+# top level of the source tree.
+#
+# Edit the definitions below to set the locations of the various directories,
+# as well as the test duration.
+#
+# Usage: sh kvm.sh [ options ]
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+scriptname=$0
+args="$*"
+
+dur=30
+KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
+PATH=${KVM}/bin:$PATH; export PATH
+builddir="${KVM}/b1"
+RCU_INITRD="$KVM/initrd"; export RCU_INITRD
+RCU_KMAKE_ARG=""; export RCU_KMAKE_ARG
+resdir=""
+configs=""
+ds=`date +%Y.%m.%d-%H:%M:%S`
+kversion=""
+
+. functions.sh
+
+usage () {
+ echo "Usage: $scriptname optional arguments:"
+ echo " --bootargs kernel-boot-arguments"
+ echo " --builddir absolute-pathname"
+ echo " --buildonly"
+ echo " --configs \"config-file list\""
+ echo " --datestamp string"
+ echo " --duration minutes"
+ echo " --interactive"
+ echo " --kmake-arg kernel-make-arguments"
+ echo " --kversion vN.NN"
+ echo " --mac nn:nn:nn:nn:nn:nn"
+ echo " --no-initrd"
+ echo " --qemu-args qemu-system-..."
+ echo " --qemu-cmd qemu-system-..."
+ echo " --results absolute-pathname"
+ echo " --relbuilddir relative-pathname"
+ exit 1
+}
+
+while test $# -gt 0
+do
+ case "$1" in
+ --bootargs)
+ checkarg --bootargs "(list of kernel boot arguments)" "$#" "$2" '.*' '^--'
+ RCU_BOOTARGS="$2"
+ shift
+ ;;
+ --builddir)
+ checkarg --builddir "(absolute pathname)" "$#" "$2" '^/' '^error'
+ builddir=$2
+ gotbuilddir=1
+ shift
+ ;;
+ --buildonly)
+ RCU_BUILDONLY=1; export RCU_BUILDONLY
+ ;;
+ --configs)
+ checkarg --configs "(list of config files)" "$#" "$2" '^[^/]*$' '^--'
+ configs="$2"
+ shift
+ ;;
+ --datestamp)
+ checkarg --datestamp "(relative pathname)" "$#" "$2" '^[^/]*$' '^--'
+ ds=$2
+ shift
+ ;;
+ --duration)
+ checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error'
+ dur=$2
+ shift
+ ;;
+ --interactive)
+ RCU_QEMU_INTERACTIVE=1; export RCU_QEMU_INTERACTIVE
+ ;;
+ --kmake-arg)
+ checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$'
+ RCU_KMAKE_ARG="$2"; export RCU_KMAKE_ARG
+ shift
+ ;;
+ --kversion)
+ checkarg --kversion "(kernel version)" $# "$2" '^v[0-9.]*$' '^error'
+ kversion=$2
+ shift
+ ;;
+ --mac)
+ checkarg --mac "(MAC address)" $# "$2" '^\([0-9a-fA-F]\{2\}:\)\{5\}[0-9a-fA-F]\{2\}$' error
+ RCU_QEMU_MAC=$2; export RCU_QEMU_MAC
+ shift
+ ;;
+ --no-initrd)
+ RCU_INITRD=""; export RCU_INITRD
+ ;;
+ --qemu-args)
+ checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
+ RCU_QEMU_ARG="$2"
+ shift
+ ;;
+ --qemu-cmd)
+ checkarg --qemu-cmd "(qemu-system-...)" $# "$2" 'qemu-system-' '^--'
+ RCU_QEMU_CMD="$2"; export RCU_QEMU_CMD
+ shift
+ ;;
+ --relbuilddir)
+ checkarg --relbuilddir "(relative pathname)" "$#" "$2" '^[^/]*$' '^--'
+ relbuilddir=$2
+ gotrelbuilddir=1
+ builddir=${KVM}/${relbuilddir}
+ shift
+ ;;
+ --results)
+ checkarg --results "(absolute pathname)" "$#" "$2" '^/' '^error'
+ resdir=$2
+ shift
+ ;;
+ *)
+ echo Unknown argument $1
+ usage
+ ;;
+ esac
+ shift
+done
+
+CONFIGFRAG=${KVM}/configs; export CONFIGFRAG
+KVPATH=${CONFIGFRAG}/$kversion; export KVPATH
+
+if test -z "$configs"
+then
+ configs="`cat $CONFIGFRAG/$kversion/CFLIST`"
+fi
+
+if test -z "$resdir"
+then
+ resdir=$KVM/res
+ if ! test -e $resdir
+ then
+ mkdir $resdir || :
+ fi
+else
+ if ! test -e $resdir
+ then
+ mkdir -p "$resdir" || :
+ fi
+fi
+mkdir $resdir/$ds
+touch $resdir/$ds/log
+echo $scriptname $args >> $resdir/$ds/log
+
+pwd > $resdir/$ds/testid.txt
+if test -d .git
+then
+ git status >> $resdir/$ds/testid.txt
+ git rev-parse HEAD >> $resdir/$ds/testid.txt
+fi
+builddir=$KVM/b1
+if ! test -e $builddir
+then
+ mkdir $builddir || :
+fi
+
+for CF in $configs
+do
+ # Running TREE01 multiple times creates TREE01, TREE01.2, TREE01.3, ...
+ rd=$resdir/$ds/$CF
+ if test -d "${rd}"
+ then
+ n="`ls -d "${rd}"* | grep '\.[0-9]\+$' |
+ sed -e 's/^.*\.\([0-9]\+\)/\1/' |
+ sort -k1n | tail -1`"
+ if test -z "$n"
+ then
+ rd="${rd}.2"
+ else
+ n="`expr $n + 1`"
+ rd="${rd}.${n}"
+ fi
+ fi
+ mkdir "${rd}"
+ echo Results directory: $rd
+ kvm-test-1-rcu.sh $CONFIGFRAG/$kversion/$CF $builddir $rd $dur "-nographic $RCU_QEMU_ARG" "rcutorture.test_no_idle_hz=1 rcutorture.verbose=1 $RCU_BOOTARGS"
+done
+# Tracing: trace_event=rcu:rcu_grace_period,rcu:rcu_future_grace_period,rcu:rcu_grace_period_init,rcu:rcu_nocb_wake,rcu:rcu_preempt_task,rcu:rcu_unlock_preempted_task,rcu:rcu_quiescent_state_report,rcu:rcu_fqs,rcu:rcu_callback,rcu:rcu_kfree_callback,rcu:rcu_batch_start,rcu:rcu_invoke_callback,rcu:rcu_invoke_kfree_callback,rcu:rcu_batch_end,rcu:rcu_torture_read,rcu:rcu_barrier
+
+echo " --- `date` Test summary:"
+kvm-recheck.sh $resdir/$ds
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
new file mode 100755
index 0000000..5432309
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+#
+# Check the build output from an rcutorture run for goodness.
+# The "file" is a pathname on the local system, and "title" is
+# a text string for error-message purposes.
+#
+# The file must contain kernel build output.
+#
+# Usage:
+# sh parse-build.sh file title
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=$1
+title=$2
+
+. functions.sh
+
+if grep -q CC < $T
+then
+ :
+else
+ print_bug $title no build
+ exit 1
+fi
+
+if grep -q "error:" < $T
+then
+ print_bug $title build errors:
+ grep "error:" < $T
+ exit 2
+fi
+exit 0
+
+if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+then
+ print_warning $title build errors:
+ egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+ exit 2
+fi
+exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
new file mode 100755
index 0000000..4185d4c
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+# Check the console output from an rcutorture run for oopses.
+# The "file" is a pathname on the local system, and "title" is
+# a text string for error-message purposes.
+#
+# Usage:
+# sh parse-console.sh file title
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/abat-chk-badness.sh.$$
+trap 'rm -f $T' 0
+
+file="$1"
+title="$2"
+
+. functions.sh
+
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
+if test -s $T
+then
+ print_warning Assertion failure in $file $title
+ cat $T
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/parse-rcutorture.sh b/tools/testing/selftests/rcutorture/bin/parse-rcutorture.sh
new file mode 100755
index 0000000..dd0a275
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/parse-rcutorture.sh
@@ -0,0 +1,106 @@
+#!/bin/sh
+#
+# Check the console output from an rcutorture run for goodness.
+# The "file" is a pathname on the local system, and "title" is
+# a text string for error-message purposes.
+#
+# The file must contain rcutorture output, but can be interspersed
+# with other dmesg text.
+#
+# Usage:
+# sh parse-rcutorture.sh file title
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2011
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/parse-rcutorture.sh.$$
+file="$1"
+title="$2"
+
+trap 'rm -f $T.seq' 0
+
+. functions.sh
+
+# check for presence of rcutorture.txt file
+
+if test -f "$file" -a -r "$file"
+then
+ :
+else
+ echo $title unreadable rcutorture.txt file: $file
+ exit 1
+fi
+
+# check for abject failure
+
+if grep -q FAILURE $file || grep -q -e '-torture.*!!!' $file
+then
+ nerrs=`grep --binary-files=text '!!!' $file | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
+ print_bug $title FAILURE, $nerrs instances
+ echo " " $url
+ exit
+fi
+
+grep --binary-files=text 'torture:.*ver:' $file | grep --binary-files=text -v '(null)' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
+awk '
+BEGIN {
+ ver = 0;
+ badseq = 0;
+ }
+
+ {
+ if (!badseq && ($5 + 0 != $5 || $5 <= ver)) {
+ badseqno1 = ver;
+ badseqno2 = $5;
+ badseqnr = NR;
+ badseq = 1;
+ }
+ ver = $5
+ }
+
+END {
+ if (badseq) {
+ if (badseqno1 == badseqno2 && badseqno2 == ver)
+ print "RCU GP HANG at " ver " rcutorture stat " badseqnr;
+ else
+ print "BAD SEQ " badseqno1 ":" badseqno2 " last:" ver " RCU version " badseqnr;
+ }
+ }' > $T.seq
+
+if grep -q SUCCESS $file
+then
+ if test -s $T.seq
+ then
+ print_warning $title $title `cat $T.seq`
+ echo " " $file
+ exit 2
+ fi
+else
+ if grep -q RCU_HOTPLUG $file
+ then
+ print_warning HOTPLUG FAILURES $title `cat $T.seq`
+ echo " " $file
+ exit 3
+ fi
+ echo $title no success message, `grep --binary-files=text 'ver:' $file | wc -l` successful RCU version messages
+ if test -s $T.seq
+ then
+ print_warning $title `cat $T.seq`
+ fi
+ exit 2
+fi
diff --git a/tools/testing/selftests/rcutorture/configs/CFLIST b/tools/testing/selftests/rcutorture/configs/CFLIST
new file mode 100644
index 0000000..cd3d29c
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/CFLIST
@@ -0,0 +1,13 @@
+TREE01
+TREE02
+TREE03
+TREE04
+TREE05
+TREE06
+TREE07
+TREE08
+TREE09
+SRCU-N
+SRCU-P
+TINY01
+TINY02
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-N b/tools/testing/selftests/rcutorture/configs/SRCU-N
new file mode 100644
index 0000000..10a0e27
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/SRCU-N
@@ -0,0 +1,8 @@
+CONFIG_RCU_TRACE=n
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-N.boot b/tools/testing/selftests/rcutorture/configs/SRCU-N.boot
new file mode 100644
index 0000000..238bfe3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/SRCU-N.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=srcu
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P b/tools/testing/selftests/rcutorture/configs/SRCU-P
new file mode 100644
index 0000000..6650e00
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/SRCU-P
@@ -0,0 +1,8 @@
+CONFIG_RCU_TRACE=n
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P.boot b/tools/testing/selftests/rcutorture/configs/SRCU-P.boot
new file mode 100644
index 0000000..238bfe3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/SRCU-P.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=srcu
diff --git a/tools/testing/selftests/rcutorture/configs/TINY01 b/tools/testing/selftests/rcutorture/configs/TINY01
new file mode 100644
index 0000000..0c2823f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TINY01
@@ -0,0 +1,13 @@
+CONFIG_SMP=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_TRACE=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PREEMPT_COUNT=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TINY02 b/tools/testing/selftests/rcutorture/configs/TINY02
new file mode 100644
index 0000000..e5072d7
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TINY02
@@ -0,0 +1,13 @@
+CONFIG_SMP=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_HZ_PERIODIC=y
+CONFIG_NO_HZ_IDLE=n
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PREEMPT_COUNT=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE01 b/tools/testing/selftests/rcutorture/configs/TREE01
new file mode 100644
index 0000000..141119a
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE01
@@ -0,0 +1,23 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_TRACE=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_FANOUT=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ZERO=y
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE01.boot b/tools/testing/selftests/rcutorture/configs/TREE01.boot
new file mode 100644
index 0000000..0fc8a34
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE01.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=rcu_bh
diff --git a/tools/testing/selftests/rcutorture/configs/TREE02 b/tools/testing/selftests/rcutorture/configs/TREE02
new file mode 100644
index 0000000..2d4d096
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE02
@@ -0,0 +1,26 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=3
+CONFIG_RCU_FANOUT_LEAF=3
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE03 b/tools/testing/selftests/rcutorture/configs/TREE03
new file mode 100644
index 0000000..a47de5b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE03
@@ -0,0 +1,23 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=y
+CONFIG_NO_HZ_IDLE=n
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_TRACE=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_FANOUT=4
+CONFIG_RCU_FANOUT_LEAF=4
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE04 b/tools/testing/selftests/rcutorture/configs/TREE04
new file mode 100644
index 0000000..8d839b8
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE04
@@ -0,0 +1,25 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=n
+CONFIG_NO_HZ_FULL=y
+CONFIG_NO_HZ_FULL_ALL=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_TRACE=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=2
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE04.boot b/tools/testing/selftests/rcutorture/configs/TREE04.boot
new file mode 100644
index 0000000..0fc8a34
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE04.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=rcu_bh
diff --git a/tools/testing/selftests/rcutorture/configs/TREE05 b/tools/testing/selftests/rcutorture/configs/TREE05
new file mode 100644
index 0000000..b5ba72e
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE05
@@ -0,0 +1,25 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_FANOUT=6
+CONFIG_RCU_FANOUT_LEAF=6
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_NONE=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE05.boot b/tools/testing/selftests/rcutorture/configs/TREE05.boot
new file mode 100644
index 0000000..3b42b8b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE05.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=sched
diff --git a/tools/testing/selftests/rcutorture/configs/TREE06 b/tools/testing/selftests/rcutorture/configs/TREE06
new file mode 100644
index 0000000..7c95ab4
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE06
@@ -0,0 +1,26 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=6
+CONFIG_RCU_FANOUT_LEAF=6
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE07 b/tools/testing/selftests/rcutorture/configs/TREE07
new file mode 100644
index 0000000..1467404
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE07
@@ -0,0 +1,24 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=16
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=n
+CONFIG_NO_HZ_FULL=y
+CONFIG_NO_HZ_FULL_ALL=y
+CONFIG_NO_HZ_FULL_SYSIDLE=y
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_FANOUT=2
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE08 b/tools/testing/selftests/rcutorture/configs/TREE08
new file mode 100644
index 0000000..7d097a6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE08
@@ -0,0 +1,26 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=16
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=3
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE08-T b/tools/testing/selftests/rcutorture/configs/TREE08-T
new file mode 100644
index 0000000..442c4e4
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE08-T
@@ -0,0 +1,26 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=16
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=3
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE09 b/tools/testing/selftests/rcutorture/configs/TREE09
new file mode 100644
index 0000000..0d1ec0d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/TREE09
@@ -0,0 +1,21 @@
+CONFIG_SMP=n
+CONFIG_NR_CPUS=1
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_RCU_DELAY=n
+CONFIG_RCU_CPU_STALL_INFO=n
+CONFIG_RCU_CPU_STALL_VERBOSE=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/CFLIST b/tools/testing/selftests/rcutorture/configs/v0.0/CFLIST
new file mode 100644
index 0000000..1822394
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/CFLIST
@@ -0,0 +1,14 @@
+P1-S-T-NH-SD-SMP-HP
+P2-2-t-nh-sd-SMP-hp
+P3-3-T-nh-SD-SMP-hp
+P4-A-t-NH-sd-SMP-HP
+P5-U-T-NH-sd-SMP-hp
+N1-S-T-NH-SD-SMP-HP
+N2-2-t-nh-sd-SMP-hp
+N3-3-T-nh-SD-SMP-hp
+N4-A-t-NH-sd-SMP-HP
+N5-U-T-NH-sd-SMP-hp
+PT1-nh
+PT2-NH
+NT1-nh
+NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..d3ef873
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,18 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..02e4185
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b3100f6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..c56b445
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,18 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..90d924f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh b/tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh
new file mode 100644
index 0000000..023f312
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh
@@ -0,0 +1,23 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH b/tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH
new file mode 100644
index 0000000..6fd0235
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH
@@ -0,0 +1,20 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..f72402d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,19 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..0f3b667
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b035e14
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..3ccf6a9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..ef624ce
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,28 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh b/tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh
new file mode 100644
index 0000000..e3361c3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh
@@ -0,0 +1,23 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH b/tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH
new file mode 100644
index 0000000..64abfc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH
@@ -0,0 +1,22 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh
new file mode 100644
index 0000000..e805253
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Kernel-version-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcutorture_param_n_barrier_cbs bootparam-string
+#
+# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
+rcutorture_param_n_barrier_cbs () {
+ echo $1
+}
+
+# rcutorture_param_onoff bootparam-string config-file
+#
+# Adds onoff rcutorture module parameters to kernels having it.
+rcutorture_param_onoff () {
+ echo $1
+}
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/CFLIST b/tools/testing/selftests/rcutorture/configs/v3.12/CFLIST
new file mode 100644
index 0000000..da4cbc66
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/CFLIST
@@ -0,0 +1,17 @@
+sysidleY.2013.06.19a
+sysidleN.2013.06.19a
+P1-S-T-NH-SD-SMP-HP
+P2-2-t-nh-sd-SMP-hp
+P3-3-T-nh-SD-SMP-hp
+P4-A-t-NH-sd-SMP-HP
+P5-U-T-NH-sd-SMP-hp
+P6---t-nh-SD-smp-hp
+N1-S-T-NH-SD-SMP-HP
+N2-2-t-nh-sd-SMP-hp
+N3-3-T-nh-SD-SMP-hp
+N4-A-t-NH-sd-SMP-HP
+N5-U-T-NH-sd-SMP-hp
+PT1-nh
+PT2-NH
+NT1-nh
+NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..d81e11d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,19 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..02e4185
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b3100f6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..c56b445
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,18 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..90d924f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp b/tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp
new file mode 100644
index 0000000..0ccc36d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp
@@ -0,0 +1,19 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_NR_CPUS=1
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..3f640cf
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP
@@ -0,0 +1,26 @@
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=16
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_NONE=y
+CONFIG_RCU_NOCB_CPU_ZERO=n
+CONFIG_RCU_NOCB_CPU_ALL=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..285da2d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=14
+CONFIG_NR_CPUS=16
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh b/tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh
new file mode 100644
index 0000000..023f312
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh
@@ -0,0 +1,23 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH b/tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH
new file mode 100644
index 0000000..6fd0235
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH
@@ -0,0 +1,20 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..9647c44
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..0f3b667
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b035e14
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..3ccf6a9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..ef624ce
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,28 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp b/tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp
new file mode 100644
index 0000000..f4c9175
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp
@@ -0,0 +1,18 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=n
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..77a8c5b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP
@@ -0,0 +1,30 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=16
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_NONE=n
+CONFIG_RCU_NOCB_CPU_ZERO=n
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_SLUB=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all
new file mode 100644
index 0000000..0eecebc
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all
@@ -0,0 +1,30 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=16
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_NONE=y
+CONFIG_RCU_NOCB_CPU_ZERO=n
+CONFIG_RCU_NOCB_CPU_ALL=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_SLUB=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none
new file mode 100644
index 0000000..0eecebc
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none
@@ -0,0 +1,30 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=16
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_NONE=y
+CONFIG_RCU_NOCB_CPU_ZERO=n
+CONFIG_RCU_NOCB_CPU_ALL=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_SLUB=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp
new file mode 100644
index 0000000..588bc70
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp
@@ -0,0 +1,30 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=16
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_NONE=n
+CONFIG_RCU_NOCB_CPU_ZERO=y
+CONFIG_RCU_NOCB_CPU_ALL=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_SLUB=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh b/tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh
new file mode 100644
index 0000000..e3361c3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh
@@ -0,0 +1,23 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH b/tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH
new file mode 100644
index 0000000..64abfc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH
@@ -0,0 +1,22 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/CFLIST b/tools/testing/selftests/rcutorture/configs/v3.3/CFLIST
new file mode 100644
index 0000000..1822394
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/CFLIST
@@ -0,0 +1,14 @@
+P1-S-T-NH-SD-SMP-HP
+P2-2-t-nh-sd-SMP-hp
+P3-3-T-nh-SD-SMP-hp
+P4-A-t-NH-sd-SMP-HP
+P5-U-T-NH-sd-SMP-hp
+N1-S-T-NH-SD-SMP-HP
+N2-2-t-nh-sd-SMP-hp
+N3-3-T-nh-SD-SMP-hp
+N4-A-t-NH-sd-SMP-HP
+N5-U-T-NH-sd-SMP-hp
+PT1-nh
+PT2-NH
+NT1-nh
+NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..d81e11d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,19 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..02e4185
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b3100f6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..c56b445
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,18 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..90d924f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh b/tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh
new file mode 100644
index 0000000..023f312
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh
@@ -0,0 +1,23 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH b/tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH
new file mode 100644
index 0000000..6fd0235
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH
@@ -0,0 +1,20 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..9647c44
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..0f3b667
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b035e14
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..3ccf6a9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..ef624ce
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,28 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh b/tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh
new file mode 100644
index 0000000..e3361c3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh
@@ -0,0 +1,23 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH b/tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH
new file mode 100644
index 0000000..64abfc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH
@@ -0,0 +1,22 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh
new file mode 100644
index 0000000..c37432f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Kernel-version-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcutorture_param_n_barrier_cbs bootparam-string
+#
+# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
+rcutorture_param_n_barrier_cbs () {
+ echo $1
+}
+
+# rcutorture_param_onoff bootparam-string config-file
+#
+# Adds onoff rcutorture module parameters to kernels having it.
+rcutorture_param_onoff () {
+ if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
+ then
+ echo CPU-hotplug kernel, adding rcutorture onoff.
+ echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+ else
+ echo $1
+ fi
+}
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/CFLIST b/tools/testing/selftests/rcutorture/configs/v3.5/CFLIST
new file mode 100644
index 0000000..1822394
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/CFLIST
@@ -0,0 +1,14 @@
+P1-S-T-NH-SD-SMP-HP
+P2-2-t-nh-sd-SMP-hp
+P3-3-T-nh-SD-SMP-hp
+P4-A-t-NH-sd-SMP-HP
+P5-U-T-NH-sd-SMP-hp
+N1-S-T-NH-SD-SMP-HP
+N2-2-t-nh-sd-SMP-hp
+N3-3-T-nh-SD-SMP-hp
+N4-A-t-NH-sd-SMP-HP
+N5-U-T-NH-sd-SMP-hp
+PT1-nh
+PT2-NH
+NT1-nh
+NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..d81e11d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,19 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..02e4185
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b3100f6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..c56b445
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,18 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..90d924f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TREE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh b/tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh
new file mode 100644
index 0000000..023f312
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh
@@ -0,0 +1,23 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH b/tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH
new file mode 100644
index 0000000..6fd0235
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH
@@ -0,0 +1,20 @@
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP
new file mode 100644
index 0000000..9647c44
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=8
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp
new file mode 100644
index 0000000..0f3b667
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=4
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp
new file mode 100644
index 0000000..b035e14
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp
@@ -0,0 +1,20 @@
+CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=n
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=2
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP
new file mode 100644
index 0000000..3ccf6a9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP
@@ -0,0 +1,22 @@
+CONFIG_RCU_TRACE=n
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp
new file mode 100644
index 0000000..ef624ce
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp
@@ -0,0 +1,28 @@
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_NO_HZ=y
+CONFIG_SMP=y
+CONFIG_RCU_FANOUT=6
+CONFIG_NR_CPUS=8
+CONFIG_RCU_FANOUT_EXACT=y
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_RT_MUTEXES=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh b/tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh
new file mode 100644
index 0000000..e3361c3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh
@@ -0,0 +1,23 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_BOOST_PRIO=2
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=n
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH b/tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH
new file mode 100644
index 0000000..64abfc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH
@@ -0,0 +1,22 @@
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_MODULE_UNLOAD=y
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+#
+CONFIG_SMP=n
+#
+CONFIG_HOTPLUG_CPU=n
+#
+CONFIG_NO_HZ=y
+#
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_PRINTK_TIME=y
+
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh
new file mode 100644
index 0000000..6a5f13a
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Kernel-version-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcutorture_param_n_barrier_cbs bootparam-string
+#
+# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
+rcutorture_param_n_barrier_cbs () {
+ if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
+ then
+ echo $1
+ else
+ echo $1 rcutorture.n_barrier_cbs=4
+ fi
+}
+
+# rcutorture_param_onoff bootparam-string config-file
+#
+# Adds onoff rcutorture module parameters to kernels having it.
+rcutorture_param_onoff () {
+ if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
+ then
+ echo CPU-hotplug kernel, adding rcutorture onoff.
+ echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+ else
+ echo $1
+ fi
+}
diff --git a/tools/testing/selftests/rcutorture/configs/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/ver_functions.sh
new file mode 100644
index 0000000..5e40ead
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/ver_functions.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Kernel-version-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcutorture_param_n_barrier_cbs bootparam-string
+#
+# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
+rcutorture_param_n_barrier_cbs () {
+ if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
+ then
+ echo $1
+ else
+ echo $1 rcutorture.n_barrier_cbs=4
+ fi
+}
+
+# rcutorture_param_onoff bootparam-string config-file
+#
+# Adds onoff rcutorture module parameters to kernels having it.
+rcutorture_param_onoff () {
+ if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
+ then
+ echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
+ echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+ else
+ echo $1
+ fi
+}
diff --git a/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
new file mode 100644
index 0000000..28db67b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
@@ -0,0 +1,40 @@
+This document gives a brief rationale for the TINY_RCU test cases.
+
+
+Kconfig Parameters:
+
+CONFIG_DEBUG_LOCK_ALLOC -- Do all three and none of the three.
+CONFIG_PREEMPT_COUNT
+CONFIG_RCU_TRACE
+
+The theory here is that randconfig testing will hit the other six possible
+combinations of these parameters.
+
+
+Kconfig Parameters Ignored:
+
+CONFIG_DEBUG_OBJECTS_RCU_HEAD
+CONFIG_PROVE_RCU
+
+ In common code tested by TREE_RCU test cases.
+
+CONFIG_NO_HZ_FULL_SYSIDLE
+CONFIG_RCU_NOCB_CPU
+CONFIG_RCU_USER_QS
+
+ Meaningless for TINY_RCU.
+
+CONFIG_RCU_STALL_COMMON
+CONFIG_RCU_TORTURE_TEST
+
+ Redundant with CONFIG_RCU_TRACE.
+
+CONFIG_HOTPLUG_CPU
+CONFIG_PREEMPT
+CONFIG_PREEMPT_RCU
+CONFIG_SMP
+CONFIG_TINY_RCU
+CONFIG_TREE_PREEMPT_RCU
+CONFIG_TREE_RCU
+
+ All forced by CONFIG_TINY_RCU.
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt
new file mode 100644
index 0000000..adbb76c
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt
@@ -0,0 +1,95 @@
+This document gives a brief rationale for the TREE_RCU-related test
+cases, a group that includes TREE_PREEMPT_RCU.
+
+
+Kconfig Parameters:
+
+CONFIG_DEBUG_LOCK_ALLOC -- Do three, covering CONFIG_PROVE_LOCKING & not.
+CONFIG_DEBUG_OBJECTS_RCU_HEAD -- Do one.
+CONFIG_HOTPLUG_CPU -- Do half. (Every second.)
+CONFIG_HZ_PERIODIC -- Do one.
+CONFIG_NO_HZ_IDLE -- Do those not otherwise specified. (Groups of two.)
+CONFIG_NO_HZ_FULL -- Do two, one with CONFIG_NO_HZ_FULL_SYSIDLE.
+CONFIG_NO_HZ_FULL_SYSIDLE -- Do one.
+CONFIG_PREEMPT -- Do half. (First three and #8.)
+CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not.
+CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING.
+CONFIG_PROVE_RCU_DELAY -- Do one.
+CONFIG_RCU_BOOST -- one of TREE_PREEMPT_RCU.
+CONFIG_RCU_BOOST_PRIO -- set to 2 for _BOOST testing.
+CONFIG_RCU_CPU_STALL_INFO -- do one with and without _VERBOSE.
+CONFIG_RCU_CPU_STALL_VERBOSE -- do one with and without _INFO.
+CONFIG_RCU_FANOUT -- Cover hierarchy as currently, but overlap with others.
+CONFIG_RCU_FANOUT_EXACT -- Do one.
+CONFIG_RCU_FANOUT_LEAF -- Do one non-default.
+CONFIG_RCU_FAST_NO_HZ -- Do one, but not with CONFIG_RCU_NOCB_CPU_ALL.
+CONFIG_RCU_NOCB_CPU -- Do three, see below.
+CONFIG_RCU_NOCB_CPU_ALL -- Do one.
+CONFIG_RCU_NOCB_CPU_NONE -- Do one.
+CONFIG_RCU_NOCB_CPU_ZERO -- Do one.
+CONFIG_RCU_TRACE -- Do half.
+CONFIG_SMP -- Need one !SMP for TREE_PREEMPT_RCU.
+RCU-bh: Do one with PREEMPT and one with !PREEMPT.
+RCU-sched: Do one with PREEMPT but not BOOST.
+
+
+Hierarchy:
+
+TREE01. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=8, CONFIG_RCU_FANOUT_EXACT=n.
+TREE02. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=n,
+ CONFIG_RCU_FANOUT_LEAF=3.
+TREE03. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=4, CONFIG_RCU_FANOUT_EXACT=n,
+ CONFIG_RCU_FANOUT_LEAF=4.
+TREE04. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n,
+ CONFIG_RCU_FANOUT_LEAF=2.
+TREE05. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=n
+ CONFIG_RCU_FANOUT_LEAF=6.
+TREE06. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=y
+ CONFIG_RCU_FANOUT_LEAF=6.
+TREE07. CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n,
+ CONFIG_RCU_FANOUT_LEAF=2.
+TREE08. CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=y,
+ CONFIG_RCU_FANOUT_LEAF=2.
+TREE09. CONFIG_NR_CPUS=1.
+
+
+Kconfig Parameters Ignored:
+
+CONFIG_64BIT
+
+ Used only to check CONFIG_RCU_FANOUT value, inspection suffices.
+
+CONFIG_NO_HZ_FULL_SYSIDLE_SMALL
+
+ Defer until Frederic uses this.
+
+CONFIG_PREEMPT_COUNT
+CONFIG_PREEMPT_RCU
+
+ Redundant with CONFIG_PREEMPT, ignore.
+
+CONFIG_RCU_BOOST_DELAY
+
+ Inspection suffices, ignore.
+
+CONFIG_RCU_CPU_STALL_TIMEOUT
+
+ Inspection suffices, ignore.
+
+CONFIG_RCU_STALL_COMMON
+
+ Implied by TREE_RCU and TREE_PREEMPT_RCU.
+
+CONFIG_RCU_TORTURE_TEST
+CONFIG_RCU_TORTURE_TEST_RUNNABLE
+
+ Always used in KVM testing.
+
+CONFIG_RCU_USER_QS
+
+ Redundant with CONFIG_NO_HZ_FULL.
+
+CONFIG_TREE_PREEMPT_RCU
+CONFIG_TREE_RCU
+
+ These are controlled by CONFIG_PREEMPT.
diff --git a/tools/testing/selftests/rcutorture/doc/initrd.txt b/tools/testing/selftests/rcutorture/doc/initrd.txt
new file mode 100644
index 0000000..49d134c
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/doc/initrd.txt
@@ -0,0 +1,90 @@
+This document describes one way to create the initrd directory hierarchy
+in order to allow an initrd to be built into your kernel. The trick
+here is to steal the initrd file used on your Linux laptop, Ubuntu in
+this case. There are probably much better ways of doing this.
+
+That said, here are the commands:
+
+------------------------------------------------------------------------
+zcat /initrd.img > /tmp/initrd.img.zcat
+mkdir initrd
+cd initrd
+cpio -id < /tmp/initrd.img.zcat
+------------------------------------------------------------------------
+
+Interestingly enough, if you are running rcutorture, you don't really
+need userspace in many cases. Running without userspace has the
+advantage of allowing you to test your kernel independently of the
+distro in place, the root-filesystem layout, and so on. To make this
+happen, put the following script in the initrd's tree's "/init" file,
+with 0755 mode.
+
+------------------------------------------------------------------------
+#!/bin/sh
+
+[ -d /dev ] || mkdir -m 0755 /dev
+[ -d /root ] || mkdir -m 0700 /root
+[ -d /sys ] || mkdir /sys
+[ -d /proc ] || mkdir /proc
+[ -d /tmp ] || mkdir /tmp
+mkdir -p /var/lock
+mount -t sysfs -o nodev,noexec,nosuid sysfs /sys
+mount -t proc -o nodev,noexec,nosuid proc /proc
+# Some things don't work properly without /etc/mtab.
+ln -sf /proc/mounts /etc/mtab
+
+# Note that this only becomes /dev on the real filesystem if udev's scripts
+# are used; which they will be, but it's worth pointing out
+if ! mount -t devtmpfs -o mode=0755 udev /dev; then
+ echo "W: devtmpfs not available, falling back to tmpfs for /dev"
+ mount -t tmpfs -o mode=0755 udev /dev
+ [ -e /dev/console ] || mknod --mode=600 /dev/console c 5 1
+ [ -e /dev/kmsg ] || mknod --mode=644 /dev/kmsg c 1 11
+ [ -e /dev/null ] || mknod --mode=666 /dev/null c 1 3
+fi
+
+mkdir /dev/pts
+mount -t devpts -o noexec,nosuid,gid=5,mode=0620 devpts /dev/pts || true
+mount -t tmpfs -o "nosuid,size=20%,mode=0755" tmpfs /run
+mkdir /run/initramfs
+# compatibility symlink for the pre-oneiric locations
+ln -s /run/initramfs /dev/.initramfs
+
+# Export relevant variables
+export ROOT=
+export ROOTDELAY=
+export ROOTFLAGS=
+export ROOTFSTYPE=
+export IP=
+export BOOT=
+export BOOTIF=
+export UBIMTD=
+export break=
+export init=/sbin/init
+export quiet=n
+export readonly=y
+export rootmnt=/root
+export debug=
+export panic=
+export blacklist=
+export resume=
+export resume_offset=
+export recovery=
+
+for i in /sys/devices/system/cpu/cpu*/online
+do
+ case $i in
+ '/sys/devices/system/cpu/cpu0/online')
+ ;;
+ '/sys/devices/system/cpu/cpu*/online')
+ ;;
+ *)
+ echo 1 > $i
+ ;;
+ esac
+done
+
+while :
+do
+ sleep 10
+done
diff --git a/tools/testing/selftests/rcutorture/doc/rcu-test-image.txt b/tools/testing/selftests/rcutorture/doc/rcu-test-image.txt
new file mode 100644
index 0000000..66efb59
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/doc/rcu-test-image.txt
@@ -0,0 +1,42 @@
+This document describes one way to created the rcu-test-image file
+that contains the filesystem used by the guest-OS kernel. There are
+probably much better ways of doing this, and this filesystem could no
+doubt be smaller. It is probably also possible to simply download
+an appropriate image from any number of places.
+
+That said, here are the commands:
+
+------------------------------------------------------------------------
+dd if=/dev/zero of=rcu-test-image bs=400M count=1
+mkfs.ext3 ./rcu-test-image
+sudo mount -o loop ./rcu-test-image /mnt
+
+# Replace "precise" below with your favorite Ubuntu release.
+# Empirical evidence says this image will work for 64-bit, but...
+# Note that debootstrap does take a few minutes to run. Or longer.
+sudo debootstrap --verbose --arch i386 precise /mnt http://archive.ubuntu.com/ubuntu
+cat << '___EOF___' | sudo dd of=/mnt/etc/fstab
+# UNCONFIGURED FSTAB FOR BASE SYSTEM
+#
+/dev/vda / ext3 defaults 1 1
+dev /dev tmpfs rw 0 0
+tmpfs /dev/shm tmpfs defaults 0 0
+devpts /dev/pts devpts gid=5,mode=620 0 0
+sysfs /sys sysfs defaults 0 0
+proc /proc proc defaults 0 0
+___EOF___
+sudo umount /mnt
+------------------------------------------------------------------------
+
+
+References:
+
+ http://sripathikodi.blogspot.com/2010/02/creating-kvm-bootable-fedora-system.html
+ https://help.ubuntu.com/community/KVM/CreateGuests
+ https://help.ubuntu.com/community/JeOSVMBuilder
+ http://wiki.libvirt.org/page/UbuntuKVMWalkthrough
+ http://www.moe.co.uk/2011/01/07/pci_add_option_rom-failed-to-find-romfile-pxe-rtl8139-bin/ -- "apt-get install kvm-pxe"
+ http://www.landley.net/writing/rootfs-howto.html
+ http://en.wikipedia.org/wiki/Initrd
+ http://en.wikipedia.org/wiki/Cpio
+ http://wiki.libvirt.org/page/UbuntuKVMWalkthrough
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
index 24e9ddd..3d907da 100644
--- a/tools/vm/Makefile
+++ b/tools/vm/Makefile
@@ -2,21 +2,21 @@
#
TARGETS=page-types slabinfo
-LK_DIR = ../lib/lk
-LIBLK = $(LK_DIR)/liblk.a
+LIB_DIR = ../lib/api
+LIBS = $(LIB_DIR)/libapikfs.a
CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall -Wextra -I../lib/
-LDFLAGS = $(LIBLK)
+LDFLAGS = $(LIBS)
-$(TARGETS): liblk
+$(TARGETS): $(LIBS)
-liblk:
- make -C $(LK_DIR)
+$(LIBS):
+ make -C $(LIB_DIR)
%: %.c
$(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
clean:
$(RM) page-types slabinfo
- make -C ../lib/lk clean
+ make -C $(LIB_DIR) clean
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index d5e9d6d..f9be24d 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -36,7 +36,7 @@
#include <sys/statfs.h>
#include "../../include/uapi/linux/magic.h"
#include "../../include/uapi/linux/kernel-page-flags.h"
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#ifndef MAX_PATH
# define MAX_PATH 256
OpenPOWER on IntegriCloud